进程池爬取并存入mongodb
设置进程池爬取拉钩网:
# coding = utf-
import json
import pymongo
import pandas as pd
import requests
from lxml import etree
import time
from multiprocessing import Pool # 设置mongodb
client = pymongo.MongoClient('localhost')
db = client['lagou']
# 查询的岗位名称
POSITION_NAME = '数据挖掘'
# 想要爬取的总页面数
PAGE_SUM =
# 每页返回的职位数量
PAGE_SIZE =
# 指定数据库的名字
DATA_NAME = "DataMiningPosition" base_url = 'https://m.lagou.com/search.json?city=%E5%85%A8%E5%9B%BD&positionName={positionName}' \
'&pageNo={pageNo}&pageSize={pageSize}' def page_index(pageno):
headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
# cookie能不要尽量不要,这里正好不用cookie也可以正常返回数据
# "Cookie": "user_trace_token=20181119151914-03711263-38a2-4d81-bd81-5f480d930039; _ga=GA1.2.605262108.1542611954; _gid=GA1.2.249787972.1542611954; LGSID=20181119151916-6c3da9fa-ebcb-11e8-8958-5254005c3644; PRE_UTM=; PRE_HOST=www.baidu.com; PRE_SITE=https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DOnHWjpEfiW4_pVm7hX8NYOFm0iJ7bz1ZJJlaKPPnmMzLE-6ypKNo0f19ABO5bjW4%26wd%3D%26eqid%3D8f61629100016e18000000065bf263e7; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fgongsi%2F147.html; LGUID=20181119151916-6c3dabf3-ebcb-11e8-8958-5254005c3644; index_location_city=%E5%85%A8%E5%9B%BD; JSESSIONID=ABAAABAAAGCABCC2D851CA25D1CFCD2B28DCDD6E00A2C7E; _ga=GA1.3.605262108.1542611954; X_HTTP_TOKEN=a0cc1a4beb8a41f57f144bc0bfd77bd7; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221672adb3834203-08b3706084b44a-3961430f-1327104-1672adb3835428%22%2C%22%24device_id%22%3A%221672adb3834203-08b3706084b44a-3961430f-1327104-1672adb3835428%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1542611954,1542612053,1542612277,1542612493; _gat=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1542613115; LGRID=20181119153837-20bafb1a-ebce-11e8-8958-5254005c3644",
"Host": "m.lagou.com",
"Proxy-Connection": "keep-alive",
"Referer": "http://m.lagou.com/search.html",
"X-Requested-With": "XMLHttpRequest",
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',
}
url = base_url.format(positionName=POSITION_NAME, pageNo=pageno, pageSize=PAGE_SIZE)
response = requests.get(url, headers=headers)
html = response.text
content = json.loads(html)
print(content)
if content.get("content"):
return content
else:
time.sleep()
return page_index(pageno) def parse_page_index(content): for i in range():
try:
item = content['content']['data']['page']['result'][i]
#print(item)
yield {
'positionId': item.get('positionId'),
'positionName': item.get('positionName'),
'city': item.get('city'),
'createTime': item.get('createTime'),
'salary': item.get('salary'),
'companyId': item.get('companyId'),
'companyFullName': item.get('companyFullName')
}
except IndexError as e:
print('可能没有那么多字段', e) def save_to_mongo(data):
if db[DATA_NAME].update({'positionId': data['positionId']}, {'$set': data}, True):
print('Saved to Mongo', data['positionId'])
else:
print('Saved to Mongo Failed', data['positionId']) def parse_detail(url):
# url = "http://m.lagou.com/jobs/4593934.html"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36",
"Accept": "text / html, application / xhtml + xml, application / xml;q = 0.9, image / webp, image / apng, * / *;q = 0.8",
"Accept - Encoding": "gzip, deflate",
"Accept - Language": "zh - CN, zh;q = 0.9",
"Cache - Control": "max - age = 0",
"Connection": "eep - alive",
# "Cookie": "_ga=GA1.2.474762156.1528795210; _gid=GA1.2.574638607.1528795210; user_trace_token=20180612172010-cdf76dc1-6e21-11e8-9af0-525400f775ce; LGUID=20180612172010-cdf772c0-6e21-11e8-9af0-525400f775ce; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1528795210,1528795215,1528795223; index_location_city=%E5%85%A8%E5%9B%BD; X_HTTP_TOKEN=f3ed266ddeee802fb7d402e4f6d4f4a3; JSESSIONID=ABAAABAAAFDABFG9F9C52FA9D8CAE24F139A0131C45E918; _ga=GA1.3.474762156.1528795210; _gat=1; LGSID=20180612184248-597a7795-6e2d-11e8-9479-5254005c3644; PRE_UTM=; PRE_HOST=; PRE_SITE=http%3A%2F%2Fm.lagou.com%2Fsearch.html; PRE_LAND=http%3A%2F%2Fm.lagou.com%2Fjobs%2F4079910.html; LGRID=20180612184505-ab051d02-6e2d-11e8-9479-5254005c3644; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1528800306" }
try:
response = requests.get(url, headers=headers)
if response.status_code == :
print("请求成功")
text = response.content.decode()
# print(text)
html = etree.HTML(text)
workyear = html.xpath('//span[@class="item workyear"]/span/text()')
if workyear:
workyear = workyear[]
else:
time.sleep()
parse_detail(url)
positiondesc = html.xpath('//div[@class="positiondesc"]//p/text()')
#print(workyear, positiondesc)
return workyear, positiondesc
except Exception as e:
print(e) # 将爬取的数据存到Mongodb
def to_mongo(page_sum):
# 拉勾网顶多只能显示到334页
for page in range(page_sum):
html = page_index(page)
items = parse_page_index(html)
# print(items)
for item in items:
print(item)
save_to_mongo(item) # 运用进程池将爬取的数据存到Mongodb
def to_mongo_pool(page):
# 拉勾网顶多只能显示到334页
content = page_index(page)
items = parse_page_index(content)
# print(items)
for item in items:
print(item)
save_to_mongo(item) # 解析爬取的字条,以便把数据转为DataFrame格式
def parse_items(page_sum):
for page in range(page_sum):
html = page_index(page)
items = parse_page_index(html)
for item in items:
positionId = item["positionId"]
detail_url = "http://m.lagou.com/jobs/{}.html".format(positionId)
workyear, positiondesc = parse_detail(detail_url)
print(positionId,positiondesc)
yield [
item["positionId"],
item["positionName"],
item["city"],
item["createTime"],
item["salary"],
item["companyId"],
item["companyFullName"],
workyear,
positiondesc
] # 把数据保存为csv格式
def to_csv(page_sum):
item_lists = []
# print(parse_items())
for item in parse_items(page_sum):
item_lists.append(item)
#print(item_lists)
data = pd.DataFrame(item_lists,
columns=["positionId", "positionName", "city", "createTime", "salary", "companyId",
"companyFullName", "workyear", "positiondesc"])
data.to_csv("python_positon.csv") if __name__ == '__main__': #to_csv
#to_mongo()
# 建议保存到mongodb数据库中 start_time = time.time()
pool = Pool() # pool()参数:进程个数:默认的是电脑cpu的核的个数,如果要指定进程个数,这个进程个数要小于等于cpu的核数
# 第一个参数是一个函数体,不需要加括号,也不需指定参数。。
# 第二个参数是一个列表,列表中的每个参数都会传给那个函数体
pool.map(to_mongo_pool,[i for i in range(PAGE_SUM)])
# close它只是把进程池关闭
pool.close()
# join起到一个阻塞的作用,主进程要等待子进程运行完,才能接着往下运行
pool.join()
end_time = time.time()
print("总耗费时间%.2f秒" % (end_time - start_time))
进程池爬取并存入mongodb的更多相关文章
- python进程池爬取下载美女图片(xpath)--lowbiprogrammer
# -*- coding: utf-8 -*-import requests,osfrom lxml import etreeimport multiprocessingfrom retrying i ...
- 基于requests模块的cookie,session和线程池爬取
目录 基于requests模块的cookie,session和线程池爬取 基于requests模块的cookie操作 基于requests模块的代理操作 基于multiprocessing.dummy ...
- 5 使用ip代理池爬取糗事百科
从09年读本科开始学计算机以来,一直在迷茫中度过,很想学些东西,做些事情,却往往陷进一些技术细节而蹉跎时光.直到最近几个月,才明白程序员的意义并不是要搞清楚所有代码细节,而是要有更宏高的方向,要有更专 ...
- Python使用Scrapy框架爬取数据存入CSV文件(Python爬虫实战4)
1. Scrapy框架 Scrapy是python下实现爬虫功能的框架,能够将数据解析.数据处理.数据存储合为一体功能的爬虫框架. 2. Scrapy安装 1. 安装依赖包 yum install g ...
- Python爬虫-代理池-爬取代理入库并测试代理可用性
目的:建立自己的代理池.可以添加新的代理网站爬虫,可以测试代理对某一网址的适用性,可以提供获取代理的 API. 整个流程:爬取代理 ----> 将代理存入数据库并设置分数 ----> 从数 ...
- 42.scrapy爬取数据入库mongodb
scrapy爬虫采集数据存入mongodb采集效果如图: 1.首先开启服务切换到mongodb的bin目录下 命令:mongod --dbpath e:\data\db 另开黑窗口 命令:mongo. ...
- 使用requests、BeautifulSoup、线程池爬取艺龙酒店信息并保存到Excel中
import requests import time, random, csv from fake_useragent import UserAgent from bs4 import Beauti ...
- 使用requests、re、BeautifulSoup、线程池爬取携程酒店信息并保存到Excel中
import requests import json import re import csv import threadpool import time, random from bs4 impo ...
- 19 03 13 关于 scrapy 框架的 对环球网的整体爬取(存储于 mongodb 数据库里)
关于 spinder 在这个框架里面 和不用数据库 相同 # -*- coding: utf-8 -*- import scrapy from yang_guan.items import ...
随机推荐
- 以Attribute加上Header验证
建立新FilterAttribute继承AuthorizationFilterAttribute,覆写OnAuthorization拦截传入的HttpActionContext内容判断是否有传入指定的 ...
- C#两个实体之间相同属性的映射
public static R Mapping<R, T>(T model) { R result = Activator.CreateInstance<R>(); forea ...
- JS实现选择排序
function selectSort(arr){ var len=arr.length; var temp; for(var i=0;i<len-1;i++){ for(var j=i+1;j ...
- SpringCloud之Hystrix断路器以及dashboard 属性详解
1.自定义hystrixCommand: https://blog.csdn.net/u012702547/article/details/78032191?utm_source=tuicool&am ...
- linux driver ------ platform模型,驱动开发分析
一.platform总线.设备与驱动 在Linux 2.6 的设备驱动模型中,关心总线.设备和驱动3个实体,总线将设备和驱动绑定.在系统每注册一个设备的时候,会寻找与之匹配的驱动:相反的,在系统每注册 ...
- GD32 ------ 使用外部中断,中断函数需要延时才能读到真正电平
MCU:GD32F103RCT6 中断引脚没有外界上拉电阻 中断配置如下: RCC_APB2PeriphClockCmd(RCC_APB2Periph_GPIOC|RCC_APB2Periph_AFI ...
- django补充
通过表名获取app的name models.UserInfo._meta.app_label >>> from repository import models >>&g ...
- 插入排序Java版
package dataStructureAlgorithmReview.day01; import java.util.Arrays; /** * 插入排序 * @author shundong * ...
- centos 7.2 部署并升级gitlab
事由: 老git服务器centos 7.2上的git版本是8.13.5,先特在一台测试机centos 7.2上安装git 8.13.5 后,还原git后,在对测试服务器上git进行升级操作. 测试服务 ...
- Linux 内核里的数据结构:位图(bitmap)
注: 本文由 LCTT 原创翻译,Linux中国 荣誉推出 Linux 内核中的位数组和位操作 除了不同的基于链式和树的数据结构以外,Linux 内核也为位数组(或称为位图(bitmap))提供了 A ...