scrapy爬取知乎问答
登陆
参考 https://github.com/zkqiang/Zhihu-Login
# -*- coding: utf-8 -*-
import scrapy
import time
import re
import base64
import hmac
import hashlib
import json
import matplotlib.pyplot as plt
from PIL import Image
class ZhihuSpider(scrapy.Spider):
name = 'zhihu'
allowed_domains = ['www.zhihu.com']
start_urls = ['http://www.zhihu.com/']
login_url = 'https://www.zhihu.com/signup'
login_api = 'https://www.zhihu.com/api/v3/oauth/sign_in'
login_data = {
'client_id': 'c3cef7c66a1843f8b3a9e6a1e3160e20',
'grant_type': 'password',
'source': 'com.zhihu.web',
'username': "+86xxxxxx",
'password': "xxxxxxxx",
# 传入'cn'是倒立汉字验证码,
'lang': 'en',
'ref_source': 'homepage'
}
headers = {
'Connection': 'keep-alive',
'Host': 'www.zhihu.com',
'Referer': 'https://www.zhihu.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/69.0.3497.100 Safari/537.36'
}
def start_requests(self):
if self.login_data["lang"] == 'cn':
api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=cn'
else:
api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=en'
yield scrapy.Request(url=api, headers=self.headers, callback=self._is_need_captcha)
def _is_need_captcha(self, response):
show_captcha = re.search(r'true', response.text)
if show_captcha:
yield scrapy.Request(url=response.url,
headers=self.headers,
method="PUT",
callback=self._get_captcha)
else:
timestamp = str(int(time.time() * 1000))
self.login_data.update({
'captcha': "",
'timestamp': timestamp,
'signature': self._get_signature(timestamp)
})
yield scrapy.FormRequest(
url=self.login_api,
formdata=self.login_data,
headers=self.headers,
callback=self.check_login
)
def _get_captcha(self, response):
json_data = json.loads(response.text)
img_base64 = json_data['img_base64'].replace(r'\n', '')
with open('./captcha.jpg', 'wb') as f:
f.write(base64.b64decode(img_base64))
img = Image.open('./captcha.jpg')
if self.login_data["lang"] == 'cn':
plt.imshow(img)
print('点击所有倒立的汉字,按回车提交')
points = plt.ginput(7)
capt = json.dumps({'img_size': [200, 44],
'input_points': [[i[0] / 2, i[1] / 2] for i in points]})
else:
img.show()
capt = input('请输入图片里的验证码:')
# 这里必须先把参数 POST 验证码接口
yield scrapy.FormRequest(url=response.url,
formdata={'input_text': capt},
headers=self.headers,
callback=self.captcha_login,
meta={"captcha":capt}
)
def captcha_login(self, response):
timestamp = str(int(time.time() * 1000))
self.login_data.update({
'captcha': response.meta['captcha'],
'timestamp': timestamp,
'signature': self._get_signature(timestamp)
})
yield scrapy.FormRequest(
url=self.login_api,
formdata=self.login_data,
headers=self.headers,
callback=self.check_login
)
def check_login(self, response):
yield scrapy.Request(
url=self.login_url,
headers=self.headers,
callback=self.parse
)
def _get_signature(self, timestamp):
"""
通过 Hmac 算法计算返回签名
实际是几个固定字符串加时间戳
:param timestamp: 时间戳
:return: 签名
"""
ha = hmac.new(b'd1b964811afb40118a12068ff74a12f4', digestmod=hashlib.sha1)
grant_type = self.login_data['grant_type']
client_id = self.login_data['client_id']
source = self.login_data['source']
ha.update(bytes((grant_type + client_id + source + timestamp), 'utf-8'))
return ha.hexdigest()
def parse(self, response):
print(response.text)
数据库设计
DROP TABLE IF EXISTS `zhihu_question`;
CREATE TABLE `zhihu_question` (
`zhuhu_id` bigint(20) NOT NULL,
`topics` varchar(255) DEFAULT NULL,
`url` varchar(300) NOT NULL,
`title` varchar(255) NOT NULL,
`content` longtext NOT NULL,
`create_time` datetime DEFAULT NULL,
`update_time` datetime DEFAULT NULL,
`answer_num` int(11) NOT NULL DEFAULT '0',
`comments_num` int(11) NOT NULL DEFAULT '0',
`watch_user_num` int(11) NOT NULL DEFAULT '0',
`click_num` int(11) NOT NULL DEFAULT '0',
`crawl_time` datetime NOT NULL,
`crawl_update_time` datetime DEFAULT NULL,
PRIMARY KEY (`zhuhu_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
DROP TABLE IF EXISTS `zhihu_answer`;
CREATE TABLE `zhihu_answer` (
`zhihu_id` bigint(20) NOT NULL,
`url` varchar(255) NOT NULL,
`question_id` bigint(20) NOT NULL,
`author_id` varchar(100) DEFAULT NULL,
`content` longtext NOT NULL,
`praise_num` int(11) NOT NULL DEFAULT '0',
`comments_num` int(11) NOT NULL DEFAULT '0',
`create_time` datetime NOT NULL,
`update_time` datetime NOT NULL,
`crawl_time` datetime NOT NULL,
`crawl_update_time` datetime DEFAULT NULL,
PRIMARY KEY (`zhihu_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
页面解析
def parse(self, response):
"""
提取出html页面中的所有url 并跟踪这些url进行一步爬取
如果提取的url中格式为 /question/xxx 就下载之后直接进入解析函数
"""
all_urls = response.css("a::attr(href)").extract()
all_urls = [urljoin(response.url, url) for url in all_urls]
all_urls = filter(lambda x: True if x.startswith("https") else False, all_urls)
for url in all_urls:
match_obj = re.match("(.*zhihu.com/question/(\d+))(/|$).*", url)
if match_obj:
# 如果提取到question相关的页面则下载后交由提取函数进行提取
request_url = match_obj.group(1)
yield scrapy.Request(request_url, headers=self.headers, callback=self.parse_question)
else:
# 如果不是question页面则直接进一步跟踪
yield scrapy.Request(url, headers=self.headers, callback=self.parse)
def parse_question(self, response):
match_obj = re.match("(.*zhihu.com/question/(\d+))(/|$).*", response.url)
if match_obj:
question_id = int(match_obj.group(2))
item_loader = ItemLoader(item=ZhihuQuestionItem(), response=response)
item_loader.add_css("title", "h1.QuestionHeader-title::text")
item_loader.add_css("content", ".QuestionHeader-detail")
item_loader.add_value("url", response.url)
item_loader.add_value("zhihu_id", question_id)
item_loader.add_css("answer_num", ".List-headerText span::text")
item_loader.add_css("comments_num", ".QuestionHeader-actions button::text")
item_loader.add_css("watch_user_num", ".NumberBoard-value::text")
item_loader.add_css("topics", ".QuestionHeader-topics .Popover div::text")
question_item = item_loader.load_item()
yield scrapy.Request(self.start_answer_url.format(question_id, 20, 0), headers=self.headers, callback=self.parse_answer)
yield question_item
def parse_answer(self, response):
#处理question的answer
ans_json = json.loads(response.text)
is_end = ans_json["paging"]["is_end"]
next_url = ans_json["paging"]["next"]
#提取answer的具体字段
for answer in ans_json["data"]:
answer_item = ZhihuAnswerItem()
answer_item["zhihu_id"] = answer["id"]
answer_item["url"] = answer["url"]
answer_item["question_id"] = answer["question"]["id"]
answer_item["author_id"] = answer["author"]["id"] if "id" in answer["author"] else None
answer_item["content"] = answer["content"] if "content" in answer else None
answer_item["parise_num"] = answer["voteup_count"]
answer_item["comments_num"] = answer["comment_count"]
answer_item["create_time"] = answer["created_time"]
answer_item["update_time"] = answer["updated_time"]
answer_item["crawl_time"] = datetime.datetime.now()
yield answer_item
if not is_end:
yield scrapy.Request(next_url, headers=self.headers, callback=self.parse_answer)
items
class ZhihuQuestionItem(scrapy.Item):
#知乎的问题 item
zhihu_id = scrapy.Field()
topics = scrapy.Field()
url = scrapy.Field()
title = scrapy.Field()
content = scrapy.Field()
answer_num = scrapy.Field()
comments_num = scrapy.Field()
watch_user_num = scrapy.Field()
click_num = scrapy.Field()
crawl_time = scrapy.Field()
def get_insert_sql(self):
#插入知乎question表的sql语句
insert_sql = """
insert into zhihu_question(zhihu_id, topics, url, title, content, answer_num, comments_num,
watch_user_num, click_num, crawl_time
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE content=VALUES(content), answer_num=VALUES(answer_num), comments_num=VALUES(comments_num),
watch_user_num=VALUES(watch_user_num), click_num=VALUES(click_num)
"""
zhihu_id = self["zhihu_id"][0]
topics = ",".join(self["topics"])
url = self["url"][0]
title = "".join(self["title"])
content = "".join(self["content"])
answer_num = extract_num("".join(self["answer_num"]))
comments_num = extract_num("".join(self["comments_num"]))
if len(self["watch_user_num"]) == 2:
watch_user_num = int(self["watch_user_num"][0])
click_num = int(self["watch_user_num"][1])
else:
watch_user_num = int(self["watch_user_num"][0])
click_num = 0
crawl_time = datetime.datetime.now().strftime(SQL_DATETIME_FORMAT)
params = (zhihu_id, topics, url, title, content, answer_num, comments_num,
watch_user_num, click_num, crawl_time)
return insert_sql, params
class ZhihuAnswerItem(scrapy.Item):
#知乎的问题回答item
zhihu_id = scrapy.Field()
url = scrapy.Field()
question_id = scrapy.Field()
author_id = scrapy.Field()
content = scrapy.Field()
parise_num = scrapy.Field()
comments_num = scrapy.Field()
create_time = scrapy.Field()
update_time = scrapy.Field()
crawl_time = scrapy.Field()
def get_insert_sql(self):
#插入知乎question表的sql语句
insert_sql = """
insert into zhihu_answer(zhihu_id, url, question_id, author_id, content, parise_num, comments_num,
create_time, update_time, crawl_time
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE content=VALUES(content), comments_num=VALUES(comments_num), parise_num=VALUES(parise_num),
update_time=VALUES(update_time)
"""
create_time = datetime.datetime.fromtimestamp(self["create_time"]).strftime(SQL_DATETIME_FORMAT)
update_time = datetime.datetime.fromtimestamp(self["update_time"]).strftime(SQL_DATETIME_FORMAT)
params = (
self["zhihu_id"], self["url"], self["question_id"],
self["author_id"], self["content"], self["parise_num"],
self["comments_num"], create_time, update_time,
self["crawl_time"].strftime(SQL_DATETIME_FORMAT),
)
return insert_sql, params
pipelines
def do_insert(self, cursor, item):
# 执行具体的插入
# 根据不同的item 构建不同的sql语句并插入到mysql中
insert_sql, params = item.get_insert_sql()
cursor.execute(insert_sql, params)
scrapy爬取知乎问答的更多相关文章
- 利用 Scrapy 爬取知乎用户信息
思路:通过获取知乎某个大V的关注列表和被关注列表,查看该大V和其关注用户和被关注用户的详细信息,然后通过层层递归调用,实现获取关注用户和被关注用户的关注列表和被关注列表,最终实现获取大量用户信息. 一 ...
- python scrapy爬取知乎问题和收藏夹下所有答案的内容和图片
上文介绍了爬取知乎问题信息的整个过程,这里介绍下爬取问题下所有答案的内容和图片,大致过程相同,部分核心代码不同. 爬取一个问题的所有内容流程大致如下: 一个问题url 请求url,获取问题下的答案个数 ...
- 使用python scrapy爬取知乎提问信息
前文介绍了python的scrapy爬虫框架和登录知乎的方法. 这里介绍如何爬取知乎的问题信息,并保存到mysql数据库中. 首先,看一下我要爬取哪些内容: 如下图所示,我要爬取一个问题的6个信息: ...
- scrapy 爬取知乎问题、答案 ,并异步写入数据库(mysql)
python版本 python2.7 爬取知乎流程: 一 .分析 在访问知乎首页的时候(https://www.zhihu.com),在没有登录的情况下,会进行重定向到(https://www. ...
- scrapy爬取知乎某个问题下的所有图片
前言: 1.仅仅是想下载图片,别人上传的图片也是没有版权的,下载来可以自己欣赏做手机背景但不商用 2.由于爬虫周期的问题,这个代码写于2019.02.13 1.关于知乎爬虫 网上能访问到的理论上都能爬 ...
- 爬虫(十六):scrapy爬取知乎用户信息
一:爬取思路 首先我们应该找到一个账号,这个账号被关注的人和关注的人都相对比较多的,就是下图中金字塔顶端的人,然后通过爬取这个账号的信息后,再爬取他关注的人和被关注的人的账号信息,然后爬取被关注人的账 ...
- scrapy-redis分布式爬取知乎问答,使用docker布置多台机器。
先上结果: 问题: 答案: 可以看到现在答案文档有十万多,十万个为什么~hh 正文开始: 分布式爬虫应该是在多台服务器(A B C服务器)布置爬虫环境,让它们重复交叉爬取,这样的话需要用到状态管理器. ...
- 爬虫实战--利用Scrapy爬取知乎用户信息
思路: 主要逻辑图:
- 教程+资源,python scrapy实战爬取知乎最性感妹子的爆照合集(12G)!
一.出发点: 之前在知乎看到一位大牛(二胖)写的一篇文章:python爬取知乎最受欢迎的妹子(大概题目是这个,具体记不清了),但是这位二胖哥没有给出源码,而我也没用过python,正好顺便学一学,所以 ...
随机推荐
- [2018福大至诚软工助教]UML设计小结
[2018福大至诚软工助教]UML设计小结 一.得分 总分(50分) 团队信息 (4分) 队名 (1分) 团队成员的学号与姓名(需要标注队长)(1分) 本次作业的博客链接(1分) 三者完全,4分 团队 ...
- 线程锁(互斥锁Mutex)及递归锁
一.线程锁(互斥锁) 在一个程序内,主进程可以启动很多个线程,这些线程都可以访问主进程的内存空间,在Python中虽然有了GIL,同一时间只有一个线程在运行,可是这些线程的调度都归系统,操作系统有自身 ...
- rest-framework解析器,url控制,分页,响应器,渲染器,版本控制
解析器 1.json解析器 发一个json格式的post请求.后台打印: request_data---> {'title': '北京折叠'} request.POST---> <Q ...
- 【问题解决方案】从 Anaconda Prompt 或 Jupyter Notebook 终端进入Python后重新退出到命令状态
从 Anaconda Prompt 或 Jupyter Notebook 终端进入Python后重新退出到命令状态 退出Python:exit() 或者 Ctrl+z 例子一枚 默认打开的是3.7,需 ...
- CodeForces Round #548 Div2
http://codeforces.com/contest/1139 A. Even Substrings You are given a string s=s1s2…sns=s1s2…sn of l ...
- js中的一些方法
数组 //map() 方法创建一个新数组,其结果是该数组中的每个元素都调用一个提供的函数后返回的结果. 返回值:一个新数组,每个元素都是回调函数的结果. var array1 = [1, 4, 9, ...
- ios不触发事件也能播放音频
ios不触发事件也能播放音频. 首先界面初始化预加载一个没有声音的音频,代码如下: html: js: $(function(){ $("#start_audio")[0].pla ...
- jquery操作复选框(checkbox)的一些小技巧总结
1.获取单个checkbox选中项(三种写法) //第一种 $("input:checkbox:checked").val() //第二种 $("input:[type= ...
- Oracle 用户管理与权限分配
用户管理是系统管理员最基本的任务之一,用户想要连接数据库并且使用相应的系统资源就必须是系统的合法用户且具有对应的权限. 1 创建用户 default tablespace default_tables ...
- Chrome 离线安装插件的办法
参考url 学习网址 https://blog.csdn.net/weixin_39068791/article/details/81411938 插件下载地址: http://www.lanfans ...