安装sqlslte,scrapy需要这个模块

yum install sqlite-devel

python3.5

下载包自己编译安装

./configure

make

make install

自带pip,升到最新版

pip3 install --upgrade pip

python3 MySQL模块

pip3 install pymysql

安装Twisted,scrapy使用的线程框架

wget https://pypi.python.org/packages/6b/23/8dbe86fc83215015e221fbd861a545c6ec5c9e9cd7514af114d1f64084ab/Twisted-16.4.1.tar.bz2#md5=c6d09bdd681f538369659111f079c29d

解包

tar -jxvf Twisted-16.4.1.tar.bz2

进目录
cd Twisted-16.4.1

安装

python3 setup.py install

安装scrapy

pip install scrapy

安装redis

yum install redis

安装scrapy-redis

git clone https://github.com/rolando/scrapy-redis.git

cd scrapy-redis/

python3 setup.py install

由于python2和python3字符串不同引起的bug,github上临时解决的方法

#util.py
import six def bytes_to_str(s, encoding='utf-8'):
"""Returns a str if a bytes object is given.""" if six.PY3 and isinstance(s, bytes):
return s.decode(encoding) return s

  

# spider.py
import six from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from scrapy.spiders import Spider, CrawlSpider from . import connection
from .utils import bytes_to_str # Default batch size matches default concurrent requests setting.
DEFAULT_START_URLS_BATCH_SIZE = 16
DEFAULT_START_URLS_KEY = '%(name)s:start_urls' class RedisMixin(object):
"""Mixin class to implement reading urls from a redis queue."""
# Per spider redis key, default to DEFAULT_KEY.
redis_key = None
# Fetch this amount of start urls when idle. Default to DEFAULT_BATCH_SIZE.
redis_batch_size = None
redis_encoding = 'utf-8'
# Redis client instance.
server = None def start_requests(self):
"""Returns a batch of start requests from redis."""
return self.next_requests() def setup_redis(self, crawler=None):
"""Setup redis connection and idle signal. This should be called after the spider has set its crawler object.
"""
if self.server is not None:
return if crawler is None:
# We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None) if crawler is None:
raise ValueError("crawler is required") settings = crawler.settings if self.redis_key is None:
self.redis_key = settings.get(
'REDIS_START_URLS_KEY', DEFAULT_START_URLS_KEY,
) self.redis_key = self.redis_key % {'name': self.name} if not self.redis_key.strip():
raise ValueError("redis_key must not be empty") if self.redis_batch_size is None:
self.redis_batch_size = settings.getint(
'REDIS_START_URLS_BATCH_SIZE', DEFAULT_START_URLS_BATCH_SIZE,
) try:
self.redis_batch_size = int(self.redis_batch_size)
except (TypeError, ValueError):
raise ValueError("redis_batch_size must be an integer") self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s)", self.__dict__) self.server = connection.from_settings(crawler.settings)
# The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle) def next_requests(self):
"""Returns a request to be scheduled or none."""
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET')
fetch_one = self.server.spop if use_set else self.server.lpop
# XXX: Do we need to use a timeout here?
found = 0
while found < self.redis_batch_size:
data = fetch_one(self.redis_key)
if not data:
# Queue empty.
break
req = self.make_request_from_data(data)
if req:
yield req
found += 1
else:
self.logger.debug("Request not made from data: %r", data) if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key) def make_request_from_data(self, data):
# By default, data is an URL. if not isinstance(data, six.string_types):
# XXX: Shall we log and continue?
self.logger.error("Wrong type for data: %s" % type(data))
url = bytes_to_str(data, self.redis_encoding)
else:
url = data # FIXME: This is a naive guard against using a wrong redis_key where
# data are not string URLs.
if '://' not in url:
# XXX: Shall this be an exception?
self.logger.error("Missing scheme in URL: '%s'", url) return self.make_requests_from_url(url) def schedule_next_requests(self):
"""Schedules a request if available"""
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self) def spider_idle(self):
"""Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()
raise DontCloseSpider class RedisSpider(RedisMixin, Spider):
"""Spider that reads urls from redis queue when idle.""" @classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(RedisSpider, self).from_crawler(crawler, *args, **kwargs)
obj.setup_redis(crawler)
return obj class RedisCrawlSpider(RedisMixin, CrawlSpider):
"""Spider that reads urls from redis queue when idle.""" @classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(RedisCrawlSpider, self).from_crawler(crawler, *args, **kwargs)
obj.setup_redis(crawler)
return obj

  

scrapy 和 scrapy_redis 安装的更多相关文章

  1. Python之Scrapy爬虫框架安装及简单使用

    题记:早已听闻python爬虫框架的大名.近些天学习了下其中的Scrapy爬虫框架,将自己理解的跟大家分享.有表述不当之处,望大神们斧正. 一.初窥Scrapy Scrapy是一个为了爬取网站数据,提 ...

  2. scrapy之环境安装

    scrapy之环境安装 在之前我安装了scrapy,但是在pycharm中却无法使用. 具体情况是: 我的电脑上存在多个python,有python2,python3,anaconda,其中anaco ...

  3. Python3.5在Windows7环境下Scrapy库的安装

    Python3.5在Windows7环境下Scrapy库的安装 忙活了一下午,总算是把Scrapy库给装完了,记下来给需要帮助的人 首先安装的环境:Windows7 64位 Python的版本是:3. ...

  4. scrapy初体验 - 安装遇到的坑及第一个范例

    scrapy,python开发的一个快速,高层次的屏幕抓取和web抓取框架,用于抓取web站点并从页面中提取结构化的数据.scrapy用途广泛,可以用于数据挖掘.监测和自动化测试.scrapy的安装稍 ...

  5. 浅析scrapy与scrapy_redis区别

    最近在工作中写了很多 scrapy_redis 分布式爬虫,但是回想 scrapy 与 scrapy_redis 两者区别的时候,竟然,思维只是局限在了应用方面,于是乎,搜索了很多相关文章介绍,这才搞 ...

  6. Scrapy框架的安装

    Win+R 输入cmd打开命令行 我们先把pip升级到最新版,输入代码如下: pip install --upgrade pip 不过一般这种更新方式会经常性出错,安装文件在下载到一半时就会超时报错 ...

  7. scrapy和scrapy_redis入门

    Scarp框架 需求 获取网页的url 下载网页内容(Downloader下载器) 定位元素位置, 获取特定的信息(Spiders 蜘蛛) 存储信息(ItemPipeline, 一条一条从管里走) 队 ...

  8. Windows平台下,Scrapy Installation,安装问题解决

    按理说直接:pip install scrapy 就可以成功,但是出现了错误"libxml/xpath.h: No such file or directory" "er ...

  9. 关于Scrapy框架的安装

    Scrapy介绍与环境安装 Scrapy介绍与环境安装 What is scrapy? An open source and collaborative framework for extractin ...

随机推荐

  1. 【jquery】基础知识

    jquery简介 1 jquery是什么 jquery由美国人John Resig创建,至今已吸引了来自世界各地的众多 javascript高手加入其team. jQuery是继prototype之后 ...

  2. IBM Z上邮件服务器的配置相关内容

    https://www.ibm.com/support/knowledgecenter/SSLTBW_1.13.0/com.ibm.zos.r13.halz002/sen.htm#sen 每次搜太费劲 ...

  3. 第六课 touch事件

    1.移动端页面在PC上浏览时,限制宽度的方法: 2.移动端页面切换设备时自动刷新页面的方法: 3.touch事件 touchstart:当手指触摸屏幕时触发.通过addEventListener添加移 ...

  4. GitHub Windows客户端无法登录

    Windows 7系统,下载GitHub后始终无法登录,貌似填写的用户名和密码都没有提交服务器,直接客户端“验证”的. 解决办法: 下载 Microsoft .NET Framework 4.5 安装 ...

  5. CozyRSS开发记录15-获取和显示RSS内容

    CozyRSS开发记录15-获取和显示RSS内容 1.内容列表 我们先给RSSContentFrame增加一个ViewModel,里面和RSS源列表一样,提供一个ObservableCollectio ...

  6. DevExpress.XtraEditors.xtraScrollableControl

    DevExpress.XtraEditors.xtraScrollableControl里面加一个有高度的控件就有滚动条了

  7. Makefile笔记之二------make的递归执行

    1.make的递归过程指的是: 在Makefile中使用"make"作为一个命令来执行本身或者其它makefile文件的过程. 2.递归的意义: 当前目录下存在一个"su ...

  8. redhat6下安装Lighttpd1.4.43

    学完了C语言,自信满满地冲着开源软件去了,首选了Lighttpd,这个软件代码量不多,适合初入开源的朋友 redhat下安装Lighttpd,一定要先安装依赖库,pcre和bzip2,这两个自行下载, ...

  9. Web前端工程师成长之路——知识汇总

    一.何为Web前端工程师?          前端工程师,也叫Web前端开发工程师.他是随着web发展,细分出来的行业.Web前端开发工程师,主要职责是利用(X)HTML/CSS/JavaScript ...

  10. 北京电子科技学院(BESTI)实验报告4

    北京电子科技学院(BESTI)实验报告4 课程: 信息安全系统设计基础 班级:1452.1453 姓名:(按贡献大小排名)周恩德 .郑凯杰 学号:(按贡献大小排名)20145217 .201453 指 ...