一,保存logging 信息

# 保存log信息的文件名
LOG_LEVEL = "INFO"
LOG_STDOUT = True
LOG_ENCODING = 'utf-8'
# 路径 os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
LOG_FILE = os.path.dirname(__file__) + "/SHANGSHIYAOPINGMULU_error.log"

二,禁止重定向

REDIRECT_ENABLED = False

三,设置延时

import random
DOWNLOAD_DELAY = random.random() + random.random()
RANDOMIZE_DOWNLOAD_DELAY = True

四,设置USER_AGENT

USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'

五,启动spider下面的全部爬虫

1.与spider同级目录中创建commands文件夹

mkdir commands

2.进入commands文件夹

cd commands

3.创建__init__.py文件

touch __init__.py

4.创建crawlall.py文件

touch crawlall.py

5.打开crawlall文件编写代码

vim crawlall.py

  

from scrapy.commands import ScrapyCommand
from scrapy.crawler import CrawlerRunner
from scrapy.exceptions import UsageError
from scrapy.utils.conf import arglist_to_dict class Command(ScrapyCommand):
requires_project = True def syntax(self):
return '[options]' def short_desc(self):
return 'Runs all of the spiders' def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE",
help="set spider argument (may be repeated)")
parser.add_option("-o", "--output", metavar="FILE",
help="dump scraped items into FILE (use - for stdout)")
parser.add_option("-t", "--output-format", metavar="FORMAT",
help="format to use for dumping items with -o") def process_options(self, args, opts):
ScrapyCommand.process_options(self, args, opts)
try:
opts.spargs = arglist_to_dict(opts.spargs)
except ValueError:
raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False) def run(self, args, opts):
# settings = get_project_settings() spider_loader = self.crawler_process.spider_loader
for spidername in args or spider_loader.list():
print("*********cralall NewsSpider************")
self.crawler_process.crawl(spidername, **opts.spargs)
self.crawler_process.start()

  

6.配置commands

COMMANDS_MODULE = 'spider.commands'   # spider 根据你的项目名称 更改

cmdline.execute(['scrapy', 'crawlall'])   # 启动项目

六,设置重新发请求的状态码

RETRY_HTTP_CODES = [500, 520]

七,配置redis

# reids连接信息
REDIS_HOST = "192.168.1.235"
REDIS_PORT = 6379
REDIS_PARAMS = {
"password": "KangCe@0608",
} # 1(必须). 使用了scrapy_redis的去重组件,在redis数据库里做去重
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" # 2(必须). 使用了scrapy_redis的调度器,在redis里分配请求
SCHEDULER = "scrapy_redis.scheduler.Scheduler" # 3(必须). 在redis中保持scrapy-redis用到的各个队列,从而允许暂停和暂停后恢复,也就是不清理redis queues
SCHEDULER_PERSIST = True # 4(必须). 通过配置RedisPipeline将item写入key为 spider.name : items 的redis的list中,供后面的分布式处理item
# 这个已经由 scrapy-redis 实现,不需要我们写代码,直接使用即可
ITEM_PIPELINES = {
# 'AQI.pipelines.AqiJsonPipeline': 200,
# 'AQI.pipelines.AqiCSVPipeline': 300,
# 'AQI.pipelines.AqiRedisPipeline': 400,
# 'AQI.pipelines.AqiMongoPipeline': 500,
'scrapy_redis.pipelines.RedisPipeline': 100
}

八.settings.py配置文件详解

# Scrapy项目的名字,这将用来构造默认 User-Agent,同时也用来log,当您使用 startproject 命令创建项目时其也被自动赋值。
BOT_NAME = 'demo1' # Scrapy搜索spider的模块列表 默认: [xxx.spiders]
SPIDER_MODULES = ['demo1.spiders']
# 使用 genspider 命令创建新spider的模块。默认: 'xxx.spiders'
NEWSPIDER_MODULE = 'demo1.spiders' # 爬取的默认User-Agent,除非被覆盖
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36' # 保存log信息的文件名
LOG_LEVEL = "INFO"
LOG_STDOUT = True
LOG_ENCODING = 'utf-8'
# 路径 os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
LOG_FILE = "SHANGSHIYAOPINGMULU_error.log" # 禁止重镜像
REDIRECT_ENABLED = False # 设置延时
import random
DOWNLOAD_DELAY = random.random() + random.random()
RANDOMIZE_DOWNLOAD_DELAY = True # 如果启用,Scrapy将会采用 robots.txt策略
ROBOTSTXT_OBEY = True # 设置重新发请求的状态码
RETRY_HTTP_CODES = [500, 520] # Scrapy downloader 并发请求(concurrent requests)的最大值,默认: 16
CONCURRENT_REQUESTS = 32 # 爬虫允许的最大深度,可以通过meta查看当前深度;0表示无深度
DEPTH_LIMIT = 3 # 爬取时,0表示深度优先Lifo(默认);1表示广度优先FiFo
# 后进先出,深度优先
DEPTH_PRIORITY = 0
SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
# 先进先出,广度优先
DEPTH_PRIORITY = 1
SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue' # 调度器队列
from scrapy.core.scheduler import Scheduler
SCHEDULER = 'scrapy.core.scheduler.Scheduler' # 访问URL去重
# DUPEFILTER_CLASS = 'step8_king.duplication.RepeatUrl' # 为同一网站的请求配置延迟(默认值:0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# 下载器在下载同一个网站下一个页面前需要等待的时间,该选项可以用来限制爬取速度,减轻服务器压力。同时也支持小数:0.25 以秒为单位
DOWNLOAD_DELAY = 3 # 下载延迟设置只有一个有效
# 对单个网站进行并发请求的最大值。
CONCURRENT_REQUESTS_PER_DOMAIN = 16
# 对单个IP进行并发请求的最大值。如果非0,则忽略 CONCURRENT_REQUESTS_PER_DOMAIN 设定,使用该设定。 也就是说,并发限制将针对IP,而不是网站。该设定也影响 DOWNLOAD_DELAY: 如果 CONCURRENT_REQUESTS_PER_IP 非0,下载延迟应用在IP而不是网站上。
CONCURRENT_REQUESTS_PER_IP = 16 # 禁用Cookie(默认情况下启用)
COOKIES_ENABLED = False # 禁用Telnet控制台(默认启用)
TELNETCONSOLE_ENABLED = False # 覆盖默认请求标头:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
} # 启用或禁用蜘蛛中间件
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'demo1.middlewares.Demo1SpiderMiddleware': 543,
} # 启用或禁用下载器中间件
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'demo1.middlewares.MyCustomDownloaderMiddleware': 543,
} # 启用或禁用扩展程序
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
EXTENSIONS = {
'scrapy.extensions.telnet.TelnetConsole': None,
} # 配置项目管道
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'demo1.pipelines.Demo1Pipeline': 300,
} # 启用和配置AutoThrottle扩展(默认情况下禁用)(开始自动限速)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True # 初始下载延迟
AUTOTHROTTLE_START_DELAY = 5 # 最大下载延迟
AUTOTHROTTLE_MAX_DELAY = 10 # 禁止重试
RETRY_ENABLED = False # 下载超时
DOWNLOAD_TIMEOUT = 10 # 下载重试次数
RETRY_ITEMS=5 # 在高延迟的情况下设置的最大下载延迟
AUTOTHROTTLE_MAX_DELAY = 60 # Scrapy请求的平均数量应该并行发送每个远程服务器( 平均每秒并发数)
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # 启用显示所收到的每个响应的调节统计信息:
AUTOTHROTTLE_DEBUG = False # 启用和配置HTTP缓存(默认情况下禁用)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# # 是否启用缓存策略
HTTPCACHE_ENABLED = True
# 缓存策略:所有请求均缓存,下次在请求直接访问原来的缓存即可
HTTPCACHE_POLICY = "scrapy.extensions.httpcache.DummyPolicy"
# 缓存策略:根据Http响应头:Cache-Control、Last-Modified 等进行缓存的策略
HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy"
# 缓存超时时间
HTTPCACHE_EXPIRATION_SECS = 0
# 缓存保存路径
HTTPCACHE_DIR = 'httpcache'
# 缓存忽略的Http状态码
HTTPCACHE_IGNORE_HTTP_CODES = []
# 缓存存储的插件
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' """
19. 代理,需要在环境变量中设置
from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware 方式一:使用默认
os.environ
{
http_proxy:http://root:woshiniba@192.168.11.11:9999/
https_proxy:http://192.168.11.11:9999/
}
方式二:使用自定义下载中间件 def to_bytes(text, encoding=None, errors='strict'):
if isinstance(text, bytes):
return text
if not isinstance(text, six.string_types):
raise TypeError('to_bytes must receive a unicode, str or bytes '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.encode(encoding, errors) class ProxyMiddleware(object):
def process_request(self, request, spider):
PROXIES = [
{'ip_port': '111.11.228.75:80', 'user_pass': ''},
{'ip_port': '120.198.243.22:80', 'user_pass': ''},
{'ip_port': '111.8.60.9:8123', 'user_pass': ''},
{'ip_port': '101.71.27.120:80', 'user_pass': ''},
{'ip_port': '122.96.59.104:80', 'user_pass': ''},
{'ip_port': '122.224.249.122:8088', 'user_pass': ''},
]
proxy = random.choice(PROXIES)
if proxy['user_pass'] is not None:
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass']))
request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass)
print "**************ProxyMiddleware have pass************" + proxy['ip_port']
else:
print "**************ProxyMiddleware no pass************" + proxy['ip_port']
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port']) DOWNLOADER_MIDDLEWARES = {
'step8_king.middlewares.ProxyMiddleware': 500,
} """ """
20. Https访问
Https访问时有两种情况:
1. 要爬取网站使用的可信任证书(默认支持)
DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory" 2. 要爬取网站使用的自定义证书
DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
DOWNLOADER_CLIENTCONTEXTFACTORY = "step8_king.https.MySSLFactory" # https.py
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate) class MySSLFactory(ScrapyClientContextFactory):
def getCertificateOptions(self):
from OpenSSL import crypto
v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.key.unsecure', mode='r').read())
v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.pem', mode='r').read())
return CertificateOptions(
privateKey=v1, # pKey对象
certificate=v2, # X509对象
verify=False,
method=getattr(self, 'method', getattr(self, '_ssl_method', None))
)
其他:
相关类
scrapy.core.downloader.handlers.http.HttpDownloadHandler
scrapy.core.downloader.webclient.ScrapyHTTPClientFactory
scrapy.core.downloader.contextfactory.ScrapyClientContextFactory
相关配置
DOWNLOADER_HTTPCLIENTFACTORY
DOWNLOADER_CLIENTCONTEXTFACTORY """ """
21. 爬虫中间件
class SpiderMiddleware(object): def process_spider_input(self,response, spider):
'''
下载完成,执行,然后交给parse处理
:param response:
:param spider:
:return:
'''
pass def process_spider_output(self,response, result, spider):
'''
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable)
'''
return result def process_spider_exception(self,response, exception, spider):
'''
异常调用
:param response:
:param exception:
:param spider:
:return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline
'''
return None def process_start_requests(self,start_requests, spider):
'''
爬虫启动时调用
:param start_requests:
:param spider:
:return: 包含 Request 对象的可迭代对象
'''
return start_requests 内置爬虫中间件:
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900, """
# from scrapy.contrib.spidermiddleware.referer import RefererMiddleware
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
# 'step8_king.middlewares.SpiderMiddleware': 543,
} """
22. 下载中间件
class DownMiddleware1(object):
def process_request(self, request, spider):
'''
请求需要被下载时,经过所有下载器中间件的process_request调用
:param request:
:param spider:
:return:
None,继续后续中间件去下载;
Response对象,停止process_request的执行,开始执行process_response
Request对象,停止中间件的执行,将Request重新调度器
raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
'''
pass def process_response(self, request, response, spider):
'''
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return:
Response 对象:转交给其他中间件process_response
Request 对象:停止中间件,request会被重新调度下载
raise IgnoreRequest 异常:调用Request.errback
'''
print('response1')
return response def process_exception(self, request, exception, spider):
'''
当下载处理器(download handler)或 process_request() (下载中间件)抛出异常
:param response:
:param exception:
:param spider:
:return:
None:继续交给后续中间件处理异常;
Response对象:停止后续process_exception方法
Request对象:停止中间件,request将会被重新调用下载
'''
return None 默认下载中间件
{
'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
} """
# from scrapy.contrib.downloadermiddleware.httpauth import HttpAuthMiddleware
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'step8_king.middlewares.DownMiddleware1': 100,
# 'step8_king.middlewares.DownMiddleware2': 500,
# }

  

  

  

  

  

setting 常用配置的更多相关文章

  1. Django setting 常用配置

    setting.py           这个文件包含了所有有关这个Django项目的配置信息,均大写:   TEMPLATE_DIRS , DATABASE_NAME , 等. 最重要的设置是 RO ...

  2. zend studio一些常用配置

    zend studio 常用 配置 1.zend中添加注释是ctrl+slash,这个slash在哪里?如何来取消注释 slash是斜杠'/'那个键,就是在,.之后的那个. 进行注释是 ctrl+'/ ...

  3. MyWebViewDemo【封装Webview常用配置和选择文件、打开相机、录音、打开本地相册的用法】

    版权声明:本文为HaiyuKing原创文章,转载请注明出处! 前言 封装webview的常用配置和选择文件.打开相机.录音.打开本地相册的用法.[如果想要使用简单的预览功能,可以参考<MyBri ...

  4. Pycharm常用配置

    Pycharm常用配置 pycharm中的设置是可以导入和导出的,file>export settings 可以保存当前pycharm中的设置为jar文件,重装时可以直接import setti ...

  5. Idea_02_常用配置

    一.前言 在上一节,我们安装并激活了IDEA,这一节我们来设置下Idea的常用配置: 项目相关配置 Idea常用配置 二.项目相关配置 运行Idea,出现下图 1.配置默认JDK 1.1 添加 SDK ...

  6. django中的setting最佳配置小结

    Django settings详解 1.基础 DJANGO_SETTING_MODULE环境变量:让settings模块被包含到python可以找到的目录下,开发情况下不需要,我们通常会在当前文件夹运 ...

  7. 【IntelliJ IDEA学习之二】IntelliJ IDEA常用配置

    版本:IntelliJIDEA2018.1.4 一.常用配置两张概览图(1)工作区总览介绍图 (2)setting配置图 --------------------------------------- ...

  8. 初学Django基础01 建立工程,目录结构,常用配置,上下文管理,模板渲染

    django是python的web重量级框架,发展已经有10年多了,对应下面版本 Django 版本 Python 版本 1.8 2.7, 3.2 , 3.3, 3.4, 3.5 1.9, 1.10 ...

  9. eclipse更换工作空间后,需要修改哪些常用配置

    一.对于配置不太了解,第一次配置. 常用 (ps:配置我们在导航栏的 Windows --> preference 里进行配置) 1.首先,我们配置编译环境:Java --> Instal ...

随机推荐

  1. C语言:传值,传地址

    形参:形式参数实参:实际参数 传值: 把实参的值复制给形参, 修改函数内的形参,不会影响实参. 传地址: 指针传值,形参为指向实参地址的指针 当对形参的指向操作时,相当于对实参本身进行的操作 #inc ...

  2. JavaScript类继承, 用什么方法好

    JavaScript类继承, 用什么方法好 一个实例: 基类Car: function Car(color, year) { this.name = "car"; this.col ...

  3. 《Linux 性能及调优指南》1.5 网络子系统

    翻译:飞哥 (http://hi.baidu.com/imlidapeng) 版权所有,尊重他人劳动成果,转载时请注明作者和原始出处及本声明. 原文名称:<Linux Performance a ...

  4. CF1133E K Balanced Teams(DP)

    /* 排序之后每个点往前能选择的是一段区间, 所以我们实际上转移位置是确定的 然后f[i][j]表示到了i选了j段的最大贡献, 显然状态数是O(n^2)的, 转移是O(1)的 */ #include& ...

  5. angularjs中阻止事件冒泡,以及指令的注意点

    appModule.directive('newStr',function(){ return{ restrict:'AE', //阻止事件冒泡需要加$event参数 template:`<di ...

  6. 利用Python实现FGO自动战斗脚本,再也不用爆肝啦~

    Fate/Grand Order(非的肝不过欧的)作为索尼为了拯救自己不倒闭而开发的面向月厨的骗氪养成抽卡爆肝游戏,居然没有像隔壁<阴阳师>的自动战斗系统(看看别人现在都自带脚本了).毕竟 ...

  7. js中的面向对象--类似于类的概念

    创建对象的几种常用方式 1.使用Object或对象字面量创建对象 2.工厂模式创建对象 3.构造函数模式创建对象 4.原型模式创建对象 1.使用Object或对象字面量创建对象 使用object va ...

  8. 关于Access导入Oracle会产生双引号的问题

    把Access2007的数据导入到oracle 10g xe中,成功了,可是在写sql语句时必须加双引号 ,如select “name” from “Product”,貌似是因为access为了防止列 ...

  9. redis下操作hash对象

    redis下操作hash对象 hash用于存储对象,对象的格式为键值对 命令 设置 设置单个属性 HSET key field value 设置多个属性 HMSET key field value [ ...

  10. 关于 Container ,Injection

    1.容器的历史 容器概念始于 1979 年提出的 UNIX chroot,它是一个 UNIX 操作系统的系统调用,将一个进程及其子进程的根目录改变到文件系统中的一个新位置,让这些进程只能访问到这个新的 ...