Scrapy项目 - 项目源码 - 实现腾讯网站社会招聘信息爬取的爬虫设计
1.tencentSpider.py
- # -*- coding: utf-8 -*-
- import scrapy
- from Tencent.items import TencentItem
- #创建爬虫类
- class TencentspiderSpider(scrapy.Spider):
- name = 'tencentSpider'#爬虫名字
- allowed_domains = ['tencent.com']#容许爬虫的作用范围
- # 定义开始的URL
- offset = 0
- url = 'https://hr.tencent.com/position.php?&start='
- #urll='#a'
- start_urls = [url + str(offset)] # 爬虫开始的URL
- def parse(self, response):
- # 继承
- item = TencentItem()
- # 根节点
- movies = response.xpath("//tr[@class='odd']|//tr[@class='even']")
- for each in movies:
- item['zhiwei']=each.xpath(".//td[@class='l square']/a/text()").extract()[0]
- item['lianjie'] = each.xpath(".//td[@class='l square']/a/@href").extract()[0]
- #item['leibie'] = each.xpath("//tr[@class='odd']/td[2]/text()|//tr[@class='even']/td[2]/text()").extract()[0]
- item['leibie'] = each.xpath("//tr[@class='odd']/td[2]/text()|//tr[@class='even']/td[2]/text()").extract()[0]
- #data = response.xpath(".//tr[@class='odd']/td[2]|//tr[@class='even']/td[2][descendant-or-self::text()]")
- #item['leibie'] = data.xpath('string(.)').extract()
- item['renshu'] = each.xpath("//tr[@class='odd']/td[3]/text()|//tr[@class='even']/td[3]/text()").extract()[0]
- item['didian'] = each.xpath("//tr[@class='odd']/td[4]/text()|//tr[@class='even']/td[4]/text()").extract()[0]
- item['shijian'] = each.xpath("//tr[@class='odd']/td[5]/text()|//tr[@class='even']/td[5]/text()").extract()[0]
- # 异常处理
- #if len(quote) != 0:
- #item['quote'] = quote[0]
- print(item)
- yield item
- if self.offset < 2840:
- self.offset += 10
- # 每次处理完一页之后,重新发送下一页请求
- # self offset 自增25,同时拼接为新的URL并调用回调函数,self parse 处理response
- yield scrapy.Request(self.url + str(self.offset),callback=self.parse)
2.items.py
- # -*- coding: utf-8 -*-
- # Define here the models for your scraped items
- #
- # See documentation in:
- # https://doc.scrapy.org/en/latest/topics/items.html
- import scrapy
- class TencentItem(scrapy.Item):
- # define the fields for your item here like:
- # name = scrapy.Field()
- zhiwei = scrapy.Field()
- lianjie = scrapy.Field()
- leibie = scrapy.Field()
- renshu = scrapy.Field()
- didian = scrapy.Field()
- shijian = scrapy.Field()
3.main.py
- from scrapy import cmdline
- #
- cmdline.execute("scrapy crawl tencentSpider".split())
4.middlewares.py
- # -*- coding: utf-8 -*-
- # Define here the models for your spider middleware
- #
- # See documentation in:
- # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
- from scrapy import signals
- class TencentSpiderMiddleware(object):
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the spider middleware does not modify the
- # passed objects.
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_spider_input(self, response, spider):
- # Called for each response that goes through the spider
- # middleware and into the spider.
- # Should return None or raise an exception.
- return None
- def process_spider_output(self, response, result, spider):
- # Called with the results returned from the Spider, after
- # it has processed the response.
- # Must return an iterable of Request, dict or Item objects.
- for i in result:
- yield i
- def process_spider_exception(self, response, exception, spider):
- # Called when a spider or process_spider_input() method
- # (from other spider middleware) raises an exception.
- # Should return either None or an iterable of Response, dict
- # or Item objects.
- pass
- def process_start_requests(self, start_requests, spider):
- # Called with the start requests of the spider, and works
- # similarly to the process_spider_output() method, except
- # that it doesn’t have a response associated.
- # Must return only requests (not items).
- for r in start_requests:
- yield r
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- class TencentDownloaderMiddleware(object):
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the downloader middleware does not modify the
- # passed objects.
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_request(self, request, spider):
- # Called for each request that goes through the downloader
- # middleware.
- # Must either:
- # - return None: continue processing this request
- # - or return a Response object
- # - or return a Request object
- # - or raise IgnoreRequest: process_exception() methods of
- # installed downloader middleware will be called
- return None
- def process_response(self, request, response, spider):
- # Called with the response returned from the downloader.
- # Must either;
- # - return a Response object
- # - return a Request object
- # - or raise IgnoreRequest
- return response
- def process_exception(self, request, exception, spider):
- # Called when a download handler or a process_request()
- # (from other downloader middleware) raises an exception.
- # Must either:
- # - return None: continue processing this exception
- # - return a Response object: stops process_exception() chain
- # - return a Request object: stops process_exception() chain
- pass
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
5.pipelines.py
- # -*- coding: utf-8 -*-
- # Define your item pipelines here
- #Tencent.json
- #class TencentPipeline(object):
- # Don't forget to add your pipeline to the ITEM_PIPELINES setting
- # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
- import json
- from openpyxl import Workbook
- class TencentPipeline(object):
- wb = Workbook()
- ws = wb.active
- # 设置表头
- ws.append(['职位', '链接', '类型', '人数', '地点', '时间'])
- def process_item(self, item, spider):
- # 添加数据
- line = [item['zhiwei'],item['lianjie'],item['leibie'],item['renshu'],item['didian'],item['shijian']]
- self.ws.append(line) # 按行添加
- self.wb.save('tencentSpider.xlsx')
- return item
6.settings.py
- # -*- coding: utf-8 -*-
- # Scrapy settings for Tencent project
- #
- # For simplicity, this file contains only settings considered important or
- # commonly used. You can find more settings consulting the documentation:
- #
- # https://doc.scrapy.org/en/latest/topics/settings.html
- # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
- # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
- BOT_NAME = 'Tencent'
- SPIDER_MODULES = ['Tencent.spiders']
- NEWSPIDER_MODULE = 'Tencent.spiders'
- # Crawl responsibly by identifying yourself (and your website) on the user-agent
- USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
- # Obey robots.txt rules
- #ROBOTSTXT_OBEY = True
- # Configure maximum concurrent requests performed by Scrapy (default: 16)
- #CONCURRENT_REQUESTS = 32
- # Configure a delay for requests for the same website (default: 0)
- # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
- # See also autothrottle settings and docs
- #DOWNLOAD_DELAY = 3
- # The download delay setting will honor only one of:
- #CONCURRENT_REQUESTS_PER_DOMAIN = 16
- #CONCURRENT_REQUESTS_PER_IP = 16
- # Disable cookies (enabled by default)
- #COOKIES_ENABLED = False
- # Disable Telnet Console (enabled by default)
- #TELNETCONSOLE_ENABLED = False
- # Override the default request headers:
- #DEFAULT_REQUEST_HEADERS = {
- # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- # 'Accept-Language': 'en',
- #}
- # Enable or disable spider middlewares
- # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
- #SPIDER_MIDDLEWARES = {
- # 'Tencent.middlewares.TencentSpiderMiddleware': 543,
- #}
- # Enable or disable downloader middlewares
- # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
- #DOWNLOADER_MIDDLEWARES = {
- # 'Tencent.middlewares.TencentDownloaderMiddleware': 543,
- #}
- # Enable or disable extensions
- # See https://doc.scrapy.org/en/latest/topics/extensions.html
- #EXTENSIONS = {
- # 'scrapy.extensions.telnet.TelnetConsole': None,
- #}
- # Configure item pipelines
- # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
- ITEM_PIPELINES = {
- 'Tencent.pipelines.TencentPipeline': 300,
- }
- # Enable and configure the AutoThrottle extension (disabled by default)
- # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
- #AUTOTHROTTLE_ENABLED = True
- # The initial download delay
- #AUTOTHROTTLE_START_DELAY = 5
- # The maximum download delay to be set in case of high latencies
- #AUTOTHROTTLE_MAX_DELAY = 60
- # The average number of requests Scrapy should be sending in parallel to
- # each remote server
- #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
- # Enable showing throttling stats for every response received:
- #AUTOTHROTTLE_DEBUG = False
- # Enable and configure HTTP caching (disabled by default)
- # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
- #HTTPCACHE_ENABLED = True
- #HTTPCACHE_EXPIRATION_SECS = 0
- #HTTPCACHE_DIR = 'httpcache'
- #HTTPCACHE_IGNORE_HTTP_CODES = []
- #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
Scrapy项目 - 项目源码 - 实现腾讯网站社会招聘信息爬取的爬虫设计的更多相关文章
- Scrapy项目 - 数据简析 - 实现腾讯网站社会招聘信息爬取的爬虫设计
一.数据分析截图 本例实验,使用Weka 3.7对腾讯招聘官网中网页上所罗列的招聘信息,如:其中的职位名称.链接.职位类别.人数.地点和发布时间等信息进行数据分析,详见如下图: 图1-1 Weka ...
- Scrapy项目 - 实现腾讯网站社会招聘信息爬取的爬虫设计
通过使Scrapy框架,进行数据挖掘和对web站点页面提取结构化数据,掌握如何使用Twisted异步网络框架来处理网络通讯的问题,可以加快我们的下载速度,也可深入接触各种中间件接口,灵活的完成各种需求 ...
- Scrapy项目 - 源码工程 - 实现豆瓣 Top250 电影信息爬取的爬虫设计
一.项目目录结构 spiders文件夹内包含doubanSpider.py文件,对于项目的构建以及结构逻辑,详见环境搭建篇. 二.项目源码 1.doubanSpider.py # -*- coding ...
- Scrapy项目 - 实现斗鱼直播网站信息爬取的爬虫设计
要求编写的程序可爬取斗鱼直播网站上的直播信息,如:房间数,直播类别和人气等.熟悉掌握基本的网页和url分析,同时能灵活使用Xmind工具对Python爬虫程序(网络爬虫)流程图进行分析. 一.项目 ...
- Scrapy项目 - 实现豆瓣 Top250 电影信息爬取的爬虫设计
通过使Scrapy框架,掌握如何使用Twisted异步网络框架来处理网络通讯的问题,进行数据挖掘和对web站点页面提取结构化数据,可以加快我们的下载速度,也可深入接触各种中间件接口,灵活的完成各种需求 ...
- Scrapy项目 - 数据简析 - 实现豆瓣 Top250 电影信息爬取的爬虫设计
一.数据分析截图(weka数据分析截图 ) 本例实验,使用Weka 3.7对豆瓣电影网页上所罗列的上映电影信息,如:标题.主要信息(年份.国家.类型)和评分等的信息进行数据分析,Weka 3.7数据分 ...
- Scrapy项目 - 数据简析 - 实现斗鱼直播网站信息爬取的爬虫设计
一.数据分析截图(weka数据分析截图 2-3个图,作业文字描述) 本次将所爬取的数据信息,如:房间数,直播类别和人气,导入Weka 3.7工具进行数据分析.有关本次的数据分析详情详见下图所示: ...
- Scrapy项目 - 实现百度贴吧帖子主题及图片爬取的爬虫设计
要求编写的程序可获取任一贴吧页面中的帖子链接,并爬取贴子中用户发表的图片,在此过程中使用user agent 伪装和轮换,解决爬虫ip被目标网站封禁的问题.熟悉掌握基本的网页和url分析,同时能灵活使 ...
- Scrapy案例02-腾讯招聘信息爬取
目录 1. 目标 2. 网站结构分析 3. 编写爬虫程序 3.1. 配置需要爬取的目标变量 3.2. 写爬虫文件scrapy 3.3. 编写yield需要的管道文件 3.4. setting中配置请求 ...
随机推荐
- python 38 线程队列与协程
目录 1. 线程队列 1.1 先进先出(FIFO) 1.2 后进先出(LIFO)堆栈 1.3 优先级队列 2. 事件event 3. 协程 4. Greenlet 模块 5. Gevent模块 1. ...
- [Python] Django框架入门3——深入视图
说明: 本文主要深入了解视图(views.py),涉及路由配置.定义视图.Request对象.Response对象.状态保持等. 一.路由配置 1.配置位置(settings.py 的 ROOT_UR ...
- .NET CORE下最快比较两个文件内容是否相同的方法 - 续
.NET CORE下最快比较两个文件内容是否相同的方法 - 续 在上一篇博文中, 我使用了几种方法试图找到哪个是.NET CORE下最快比较两个文件的方法.文章发布后,引起了很多博友的讨论, 在此我对 ...
- .netcore 分布式事务CAP2.6之控制台使用
上一编讲了cap2.6的快速入门,这次我们来讲讲在控制台中如何使用cap2.6.因为cap2.6的内存模式目前已经可以使用了,相关组件已经更新,所以这次我们以简单的内存模式为例. 1:创建项目 创建一 ...
- JavaScript 小游戏 贪吃蛇
贪吃蛇 代码: <!DOCTYPE html><html><head> <meta charset="UTF-8"> <met ...
- VS Code 配置 Python 开发环境
1.终端运行 Python2.安装 Python 插件3.查看.安装外部库4.代码补全工具5.代码检查工具5.1.pylint5.2.flake8 和 yapf 本文基于 VS Code 1.36.1 ...
- 深入理解static关键字
class A{ public int i = 10; public void show(){ System.out.printf("%d",i); } } class M{ pu ...
- PAT L3-017. 森森快递
L3-017. 森森快递 时间限制 400 ms 内存限制 65536 kB 代码长度限制 8000 B 判题程序 Standard 作者 俞勇(上海交通大学) 森森开了一家快递公司,叫森森快递.因为 ...
- Python学习之旅:使用Python实现Linux中的ls命令
一.写在前面 前几天在微信上看到这样一篇文章,链接为:https://mp.weixin.qq.com/s/rl6Sgv3uk_IpoFAx6cWa8w,在这篇文章中,有这样一段话,吸引了我的注意: ...
- R:ggplot2数据可视化——进阶(1)
,分为三个部分,此篇为Part1,推荐学习一些基础知识后阅读~ Part 1: Introduction to ggplot2, 覆盖构建简单图表并进行修饰的基础知识 Part 2: Customiz ...