1.代码如下:

doubanmoive.py

# -*- coding: utf-8 -*-
import scrapy
from douban.items import DoubanItem class DoubamovieSpider(scrapy.Spider):
name = "doubanmovie"
allowed_domains = ["movie.douban.com"]
offset = 0
url = "https://movie.douban.com/top250?start="
start_urls = (
url+str(offset),
) def parse(self, response):
item = DoubanItem()
movies = response.xpath("//div[@class='info']") for each in movies:
# 标题
item['title'] = each.xpath(".//span[@class='title'][1]/text()").extract()[0]
# 信息
item['bd'] = each.xpath(".//div[@class='bd']/p/text()").extract()[0]
# 评分
item['star'] = each.xpath(".//div[@class='star']/span[@class='rating_num']/text()").extract()[0]
# 简介
quote = each.xpath(".//p[@class='quote']/span/text()").extract()
if len(quote) != 0:
item['quote'] = quote[0]
yield item if self.offset < 225:
self.offset += 25
yield scrapy.Request(self.url + str(self.offset), callback = self.parse)

items.py

import scrapy

class DoubanItem(scrapy.Item):
# define the fields for your item here like:
# 标题
title = scrapy.Field()
# 信息
bd = scrapy.Field()
# 评分
star = scrapy.Field()
# 简介
quote = scrapy.Field()

2.在管道文件中更改储存位置

import pymongo
from scrapy.conf import settings class DoubanPipeline(object):
def __init__(self):
host = settings["MONGODB_HOST"]
port = settings["MONGODB_PORT"]
dbname = settings["MONGODB_DBNAME"]
sheetname= settings["MONGODB_SHEETNAME"] # 创建MONGODB数据库链接
client = pymongo.MongoClient(host = host, port = port)
# 指定数据库
mydb = client[dbname]
# 存放数据的数据库表名
self.sheet = mydb[sheetname] def process_item(self, item, spider):
data = dict(item)
self.sheet.insert(data)
return item

3.新建中间件 middlewares.py 进行反反爬虫

 # -*- coding:utf-8 -*-

 import random
import base64 from settings import USER_AGENTS
from settings import PROXIES # 随机的User-Agent
class RandomUserAgent(object):
def process_request(self, request, spider):
useragent = random.choice(USER_AGENTS)
#print useragent
request.headers.setdefault("User-Agent", useragent) class RandomProxy(object):
def process_request(self, request, spider):
proxy = random.choice(PROXIES) if proxy['user_passwd'] is None:
# 没有代理账户验证的代理使用方式
request.meta['proxy'] = "http://" + proxy['ip_port'] else:
# 对账户密码进行base64编码转换
base64_userpasswd = base64.b64encode(proxy['user_passwd'])
# 对应到代理服务器的信令格式里
request.headers['Proxy-Authorization'] = 'Basic ' + base64_userpasswd request.meta['proxy'] = "http://" + proxy['ip_port']

4.setting的设置

 # -*- coding: utf-8 -*-

 # Scrapy settings for douban project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html BOT_NAME = 'douban' SPIDER_MODULES = ['douban.spiders']
NEWSPIDER_MODULE = 'douban.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;" # Obey robots.txt rules
#ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2.5
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'douban.middlewares.MyCustomSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'douban.middlewares.RandomUserAgent': 100,
'douban.middlewares.RandomProxy': 200,
} USER_AGENTS = [
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.2)',
'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
'Mozilla/5.0 (Linux; U; Android 4.0.3; zh-cn; M032 Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30',
'Mozilla/5.0 (Windows; U; Windows NT 5.2) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.13'
] PROXIES = [
{"ip_port" :"121.42.140.113:16816", "user_passwd" : "mr_mao_hacker:sffqry9r"},
#{"ip_prot" :"121.42.140.113:16816", "user_passwd" : ""}
#{"ip_prot" :"121.42.140.113:16816", "user_passwd" : ""}
#{"ip_prot" :"121.42.140.113:16816", "user_passwd" : ""}
] #LOG_FILE = "douban.log"
#LOG_LEVEL = "DEBUG"
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'douban.pipelines.DoubanPipeline': 300,
} # MONGODB 主机名
MONGODB_HOST = "127.0.0.1" # MONGODB 端口号
MONGODB_PORT = 27017 # 数据库名称
MONGODB_DBNAME = "Douban" # 存放数据的表名称
MONGODB_SHEETNAME = "doubanmovies" # Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

爬取豆瓣电影储存到数据库MONGDB中以及反反爬虫的更多相关文章

  1. python2.7爬取豆瓣电影top250并写入到TXT,Excel,MySQL数据库

    python2.7爬取豆瓣电影top250并分别写入到TXT,Excel,MySQL数据库 1.任务 爬取豆瓣电影top250 以txt文件保存 以Excel文档保存 将数据录入数据库 2.分析 电影 ...

  2. urllib+BeautifulSoup无登录模式爬取豆瓣电影Top250

    对于简单的爬虫任务,尤其对于初学者,urllib+BeautifulSoup足以满足大部分的任务. 1.urllib是Python3自带的库,不需要安装,但是BeautifulSoup却是需要安装的. ...

  3. 一起学爬虫——通过爬取豆瓣电影top250学习requests库的使用

    学习一门技术最快的方式是做项目,在做项目的过程中对相关的技术查漏补缺. 本文通过爬取豆瓣top250电影学习python requests的使用. 1.准备工作 在pycharm中安装request库 ...

  4. 爬取豆瓣电影TOP 250的电影存储到mongodb中

    爬取豆瓣电影TOP 250的电影存储到mongodb中 1.创建项目sp1 PS D:\scrapy> scrapy.exe startproject douban 2.创建一个爬虫 PS D: ...

  5. 利用Python爬取豆瓣电影

    目标:使用Python爬取豆瓣电影并保存MongoDB数据库中 我们先来看一下通过浏览器的方式来筛选某些特定的电影: 我们把URL来复制出来分析分析: https://movie.douban.com ...

  6. 【转】爬取豆瓣电影top250提取电影分类进行数据分析

    一.爬取网页,获取需要内容 我们今天要爬取的是豆瓣电影top250页面如下所示: 我们需要的是里面的电影分类,通过查看源代码观察可以分析出我们需要的东西.直接进入主题吧! 知道我们需要的内容在哪里了, ...

  7. 爬虫系列(十一) 用requests和xpath爬取豆瓣电影评论

    这篇文章,我们继续利用 requests 和 xpath 爬取豆瓣电影的短评,下面还是先贴上效果图: 1.网页分析 (1)翻页 我们还是使用 Chrome 浏览器打开豆瓣电影中某一部电影的评论进行分析 ...

  8. scrapy爬虫框架教程(二)-- 爬取豆瓣电影TOP250

    scrapy爬虫框架教程(二)-- 爬取豆瓣电影TOP250 前言 经过上一篇教程我们已经大致了解了Scrapy的基本情况,并写了一个简单的小demo.这次我会以爬取豆瓣电影TOP250为例进一步为大 ...

  9. scrapy爬取豆瓣电影top250

    # -*- coding: utf-8 -*- # scrapy爬取豆瓣电影top250 import scrapy from douban.items import DoubanItem class ...

随机推荐

  1. Socket网络编程之概述理解

    今天主要讲讲什么是socket网络编程 socketde 英文原义是"孔"或者"插座".是进程通讯的一种方式,即调用这个网络库的一些API函数实现分布在不同主机 ...

  2. 阿里云CentOS搭建系统

    1.在阿里云网站上购买申请服务器. 2.通过Xshell连接服务器,并用root账户登入. 3.配置java开发环境:(也可以使用阿里云一键部署,自动配置并部署服务器) 一.安装jdk 1.查看Lin ...

  3. 初识Redux-Saga

    Redus-saga是一个redux的中间件,主要用来简便而优雅的处理redux应用里的副作用(side effect相对于pure function这类概念而言的).它之所以可以做到这一点主要是使用 ...

  4. Vue.js—实现图书管理系统

      前  言 今天我们主要一起来学习一个新框架的使用--Vue.js,之前我们也讲过AngularJS是如何使用的,而今天要讲的Vue.js的语法和AngularJS很相似,因为 AngularJS ...

  5. org.xml.sax.SAXParseException; lineNumber: 2; columnNumber: 6; 不允许有匹配 "[xX][mM][lL]" 的处理指令目标。

      Exception in thread "main" org.apache.ibatis.exceptions.PersistenceException: ### Error ...

  6. 73、django之setting配置汇总

    前面的随笔中我们经常会改setting配置也经常将一些配置混淆今天主要是将一些常见的配置做一个汇总. setting配置汇总 1.app路径 INSTALLED_APPS = [ 'django.co ...

  7. Zabbix 3.0 部署监控 [二]

    一.添加监控主机及设置   1.创建主机 Agent可以干一些SNMP无法干的事情,例如自定义监控项 snmp相关文章:http://www.abcdocker.com/abcdocker/1376  ...

  8. sqlserver的四种分页方式

    第一种:ROW_NUMBER() OVER()方式 select * from ( select *, ROW_NUMBER() OVER(Order by ArtistId ) AS RowId f ...

  9. ##1.Centos7环境准备-- openstack pike

    ##1.Centos7环境准备 openstack pike 安装 目录汇总 http://www.cnblogs.com/elvi/p/7613861.html ##.Centos7环境准备 #Ce ...

  10. 上班打卡--- 通过批处理命令执行jar文件来记录上班时间

    如果 一个程序员要记录自己上班工作时间的话 ,还需要靠手动去记录, 那就有点 不够范了, 程序员自然要有自己的极客范儿 , 下面就跟我一起来(zhuangbi); 先列一下整体的步骤: 1: 先做一个 ...