17.splash_case06_ScrapySplashTest-master
taobao.py
# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from urllib.parse import quote
from scrapysplashtest.items import ProductItem
from scrapy_splash import SplashRequest
script = """
function main(splash, args)
splash.images_enabled = false
assert(splash:go(args.url))
assert(splash:wait(args.wait))
js = string.format("document.querySelector('#mainsrp-pager div.form > input').value=%d;document.querySelector('#mainsrp-pager div.form > span.btn.J_Submit').click()", args.page)
splash:evaljs(js)
assert(splash:wait(args.wait))
return splash:html()
end
"""
class TaobaoSpider(Spider):
name = 'taobao'
allowed_domains = ['www.taobao.com']
base_url = 'https://s.taobao.com/search?q='
def start_requests(self):
for keyword in self.settings.get('KEYWORDS'):
for page in range(1, self.settings.get('MAX_PAGE') + 1):
url = self.base_url + quote(keyword)
yield SplashRequest(url, callback=self.parse, endpoint='execute', args={'lua_source': script, 'page': page, 'wait': 7})
def parse(self, response):
products = response.xpath('//div[@id="mainsrp-itemlist"]//div[@class="items"][1]//div[contains(@class, "item")]')
for product in products:
item = ProductItem()
item['price'] = ''.join(
product.xpath('.//div[contains(@class, "price")]//text()').extract()).strip()
item['title'] = ''.join(
product.xpath('.//div[contains(@class, "title")]//text()').extract()).strip()
item['shop'] = ''.join(
product.xpath('.//div[contains(@class, "shop")]//text()').extract()).strip()
item['image'] = ''.join(product.xpath(
'.//div[@class="pic"]//img[contains(@class, "img")]/@data-src').extract()).strip()
item['deal'] = product.xpath(
'.//div[contains(@class, "deal-cnt")]//text()').extract_first()
item['location'] = product.xpath(
'.//div[contains(@class, "location")]//text()').extract_first()
yield item
items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class ProductItem(Item):
collection = 'products'
image = Field()
price = Field()
deal = Field()
title = Field()
shop = Field()
location = Field()
middlewares.py
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class ScrapysplashtestSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DB'))
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def process_item(self, item, spider):
self.db[item.collection].insert(dict(item))
return item
def close_spider(self, spider):
self.client.close()
settings.py
# -*- coding: utf-8 -*-
# Scrapy settings for scrapysplashtest project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapysplashtest'
SPIDER_MODULES = ['scrapysplashtest.spiders']
NEWSPIDER_MODULE = 'scrapysplashtest.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'scrapysplashtest (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapysplashtest.pipelines.MongoPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
KEYWORDS = ['iPad']
MAX_PAGE = 100
SPLASH_URL = 'http://localhost:8050'
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
MONGO_URI = 'localhost'
MONGO_DB = 'taobao'
17.splash_case06_ScrapySplashTest-master的更多相关文章
- ssh: connect to host master port 22: Connection refused
hadoop集群启动的时候namenode显示Connection refused 到windows下ping master 显示传输中过期 ip是静态的 ssh master 也是连接拒绝 重启s ...
- REdis MASTER aborted replication NOAUTH Authentication required
对于REdis集群,如果设置了requirepass,则一定要设置masterauth,否则从节点无法正常工作,查看从节点日志可以看到哪下内容:19213:S 22 Apr 2019 10:52:17 ...
- 痞子衡嵌入式:i.MXRT全系列下FlexSPI外设AHB Master ID定义与AHB RX Buffer指定的异同
大家好,我是痞子衡,是正经搞技术的痞子.今天痞子衡给大家介绍的是i.MXRT全系列下FlexSPI外设AHB Master ID定义与AHB RX Buffer指定的异同. 因为 i.MXRT 全系列 ...
- 设置redis主从出现的问题
314:S 05 Jan 15:12:17.433 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc ...
- Macaca自动化测试之PC端测试
Macaca是一套完整的自动化测试解决方案.由阿里巴巴公司开源: http://macacajs.github.io/macaca/ 特点: 同时支持PC端和移动端(Android.iOS)自动化测试 ...
- Saltstack-自动化部署
Saltstack概述 Salt一种全新的基础设施管理方式,部署轻松,在几分钟内可运行起来,扩展性好,很容易管理上万台服务器,速度够快,服务器之间秒级通讯. salt底层采用动态的连接总线, 使其可以 ...
- Mydumper & Myloader Documentation
Mydumper.org web site has been missing in action for a while now. I've uploaded a copy of the Mydump ...
- [转](多实例)mysql-mmm集群
一.需求说明 最近一直在学习mysql-mmm,想以后这个架构也能用在我们公司的业务上,我们公司的业务是单机多实例部署,所以也想把mysql-mmm部署成这样,功夫不负有心人,我成功了,和大家分享一下 ...
- Redis中的master-slave&sentinel
redis安装 解压完成后可以看到INSTALL和README.md文件,查看以获取更多有用信息. 在README文件中可以获取到软件的安装步骤.以下安装步骤基于此. #step1 进入文件夹,执行编 ...
- hadoop单机环境搭建
[在此处输入文章标题] Hadoop单机搭建 1. 工具准备 1) Hadoop Linux安装包 2) VMware虚拟机 3) Java Linux安装包 4) Window 电脑一台 2. 开始 ...
随机推荐
- java常用类——比较器
Comparable和Comparator接口都是为了对类进行比较,众所周知,诸如Integer,double等基本数据类型,java可以对他们进行比较,而对于类的比较,需要人工定义比较用到的字段比较 ...
- bzoj1003题解
[题意分析] 给你一张无向图,固定起点和终点,除这两点外每个点都有可能消失一段时间(保证起点和终点相互可达),每天选择的路径总长,以及对路径的修改都有代价,求给定时间内最小代价保证起点终点始终连通. ...
- 47 ubuntu指令整理学习
0 引言 在使用ubutnu时,积累了大量命令笔记.但是这些笔记比较零散,没有系统性,不便于查找和使用.通过系统性.分门别类地整理,希望可以增强对指令的记忆,提高工作效率,对ubuntu的使用更加地道 ...
- Delphi GDI(一)
Delphi 7下IGDIPlus库的使用 IGDI+是一个免费开源封装微软GDI+功能的Delphi库,该库使得可以用Delphi语言代码快速简短的实现复杂GDI+应用程序. 官方网站:http:/ ...
- luoguP4768 [NOI2018]归程
传送门 kruskal重构树: kruskal合并两个联通块时合并的边一定是联通块中权值最大的边,小于等于这条边的边所能联通的所有点在这个联通块中. 在合并两个联通块的时候新建一个点作为两个联通块代表 ...
- hbase 集群搭建(公司内部测试集群)
我用的是cdh4.5版本:配置文件:$HBASE_HOME/conf/hbase-env.shexport JAVA_HOME=$JAVA_HOMEexport JAVA_HOME=/home/had ...
- HDFS API 操作实例(二) 目录操作
1. 递归读取文件名 1.1 递归实现读取文件名(scala + listFiles) /** * 实现:listFiles方法 * 迭代列出文件夹下的文件,只能列出文件 * 通过fs的listFil ...
- webpack3
6月20号webpack推出了3.0版本,官方也发布了公告.根据公告介绍,webpack团队将未来版本的改动聚焦在社区提出的功能需求,同时将保持一个快速.稳定的发布节奏.本文主要依据公告内容,简单介绍 ...
- Spring Cloud高级视频
Spring Cloud高级视频 第一章 微服务架构概述 第二章 开始使用Spring Cloud实战微服务 第三章 服务提供者与服务消费者 第四章 服务发现与服务注册 第五章 使用Hystrix保护 ...
- Mybatis使用Mapper方式CURD
Mybatis 使用Dao代码方式进行增.删.改.查和分页查询. 1.Maven的pom.xml 2.配置文件 2.1.db.properties 2.2.mybatis.xml <?xml v ...