# -*- coding: utf-8 -*-

# Scrapy settings for JobBole project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html BOT_NAME = 'JobBole' SPIDER_MODULES = ['JobBole.spiders']
NEWSPIDER_MODULE = 'JobBole.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'JobBole (+http://www.yourdomain.com)' # 是否遵守爬虫协议,默认遵守,改成Fals,不遵守
ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#} # Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html # SPIDER_MIDDLEWARES = {
# 'JobBole.middlewares.JobboleSpiderMiddleware': 543,
# } # 图片下载配置
import os
IMAGES_URLS_FIELD = 'img_url'
base_dir = os.path.abspath(os.path.dirname(__file__))
IMAGES_STORE = os.path.join(base_dir, 'images') # Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'JobBole.middlewares.MyCustomDownloaderMiddleware': 543,
# } # Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = {
'JobBole.pipelines.JobboleImagerPipeline': 1,
'JobBole.pipelines.SqlSave': 2,
} # 数据库基本配置
SQL_DBA= {
'host': 'localhost',
'db': 'jobole',
'user': 'root',
'password': 'password',
'charset': 'utf-8',
'use_unicode': True
} # Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

  

爬取知名社区技术文章_setting_5的更多相关文章

  1. 爬取知名社区技术文章_items_2

    item中定义获取的字段和原始数据进行处理并合法化数据 #!/usr/bin/python3 # -*- coding: utf-8 -*- import scrapy import hashlib ...

  2. 爬取知名社区技术文章_pipelines_4

    获取字段的存储处理和获取普通的路径 #!/usr/bin/python3 # -*- coding: utf-8 -*- import pymysql import gevent import pym ...

  3. 爬取知名社区技术文章_article_3

    爬虫主逻辑处理,获取字段,获取主url和子url #!/usr/bin/python3 # -*- coding: utf-8 -*- import scrapy from scrapy.http i ...

  4. 第4章 scrapy爬取知名技术文章网站(2)

    4-8~9 编写spider爬取jobbole的所有文章 # -*- coding: utf-8 -*- import re import scrapy import datetime from sc ...

  5. 爬取博主所有文章并保存到本地(.txt版)--python3.6

    闲话: 一位前辈告诉我大学期间要好好维护自己的博客,在博客园发布很好,但是自己最好也保留一个备份. 正好最近在学习python,刚刚从py2转到py3,还有点不是很习惯,正想着多练习,于是萌生了这个想 ...

  6. 爬虫实战——Scrapy爬取伯乐在线所有文章

    Scrapy简单介绍及爬取伯乐在线所有文章 一.简说安装相关环境及依赖包 1.安装Python(2或3都行,我这里用的是3) 2.虚拟环境搭建: 依赖包:virtualenv,virtualenvwr ...

  7. Node爬取简书首页文章

    Node爬取简书首页文章 博主刚学node,打算写个爬虫练练手,这次的爬虫目标是简书的首页文章 流程分析 使用superagent发送http请求到服务端,获取HTML文本 用cheerio解析获得的 ...

  8. 使用Python爬取微信公众号文章并保存为PDF文件(解决图片不显示的问题)

    前言 第一次写博客,主要内容是爬取微信公众号的文章,将文章以PDF格式保存在本地. 爬取微信公众号文章(使用wechatsogou) 1.安装 pip install wechatsogou --up ...

  9. Python3.6+Scrapy爬取知名技术文章网站

    爬取分析 伯乐在线已经提供了所有文章的接口,还有下一页的接口,所有我们可以直接爬取一页,再翻页爬. 环境搭建 Windows下安装Python: http://www.cnblogs.com/0bug ...

随机推荐

  1. xssgame挑战wp

    前言 昨晚做了一些xss的题目,有点上瘾了,今天想着再找一些来做做. google了一下,发现了不少,找到这么一个. 地址: https://xss-game.appspot.com 这是一个国外的x ...

  2. Python 调用让系统自动调用默认程序打开文件?

    windows上可以使用os.startfile os.startfile(file) linux上可以使用xdg-open subprocess.call(["xdg-open" ...

  3. 【状压DP】poj3254 Corn Fields

    题意: 一块n*m的田,1表示这个地方可以种植,0代表这个地方不能种植.植物种植还必须满足两株植物不能相邻(横竖都不行).问共有几种种植方法,而且当什么都不种时认为是一种方法. 解题思路: 种植用1表 ...

  4. [原创]Nginx反向代理及负载均衡

    1.基本命令 # 启动nginx start nginx.exe # windowsnginx -c /usr/local/nginx/conf/nginx.conf # Linux# 优雅的停止ng ...

  5. Error:C:\Users\issuser\AndroidStudioProjects\SQLiteDemo1\.gradle\buildOutputCleanup\cache.properties (系统找不到指定的文件。)

    android studio报下图中的这个错误的解决办法: 解决办法: 1.删除掉下图中标记的2个文件夹 2.将下图标记的文件的文件名重命名,把最后的后缀.lock去掉,因为加上了这个后缀,所以提示找 ...

  6. js变量提升小记

    作为世界上最优美的语言javascript的使用者,呵呵,js的魅力是无穷的,今天来聊聊他的魅(dan)力(teng)之一,变量提升. 每种语言所定义的变量基本都会有一定得作用域,而js的作用域则存在 ...

  7. H5 调用手机摄像机、相册功能

    <input type="file" accept="image/*" capture="camera"> <input ...

  8. Codeforces Round #411 (Div. 2)(A,B,C,D 四水题)

    A. Fake NP time limit per test:1 second memory limit per test:256 megabytes input:standard input out ...

  9. [51nod1410]回文调整

    给一个序列,选择其中一个区间,这个区间内的数字顺序可以随意互换.问有多少这样的选择使得整个序列(不是选择的区间)是一个回文. 说明:为了要使得整个序列是一个回文,可以选择一个区间对里面的数字进行调整, ...

  10. 2017ecjtu-summer training #4 UESTC 30

    最短路 Time Limit: 3000/1000MS (Java/Others)     Memory Limit: 65535/65535KB (Java/Others) 在每年的校赛里,所有进入 ...