基于Requests和BeautifulSoup实现“自动登录”实例

自动登录抽屉新热榜

#!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests # ############## 方式一 ##############
"""
# ## 1、首先登陆任何页面,获取cookie
i1 = requests.get(url="http://dig.chouti.com/help/service")
i1_cookies = i1.cookies.get_dict() # ## 2、用户登陆,携带上一次的cookie,后台对cookie中的 gpsd 进行授权
i2 = requests.post(
url="http://dig.chouti.com/login",
data={
'phone': "8615131255089",
'password': "xxooxxoo",
'oneMonth': ""
},
cookies=i1_cookies
) # ## 3、点赞(只需要携带已经被授权的gpsd即可)
gpsd = i1_cookies['gpsd']
i3 = requests.post(
url="http://dig.chouti.com/link/vote?linksId=8589523",
cookies={'gpsd': gpsd}
) print(i3.text)
""" # ############## 方式二 ##############
"""
import requests session = requests.Session()
i1 = session.get(url="http://dig.chouti.com/help/service")
i2 = session.post(
url="http://dig.chouti.com/login",
data={
'phone': "8615131255089",
'password': "xxooxxoo",
'oneMonth': ""
}
)
i3 = session.post(
url="http://dig.chouti.com/link/vote?linksId=8589523"
)
print(i3.text) """ 抽屉新热榜

示例

自动登录GitHub

#!/usr/bin/env python
# -*- coding:utf-8 -*- import requests
from bs4 import BeautifulSoup # ############## 方式一 ##############
#
# # 1. 访问登陆页面,获取 authenticity_token
# i1 = requests.get('https://github.com/login')
# soup1 = BeautifulSoup(i1.text, features='lxml')
# tag = soup1.find(name='input', attrs={'name': 'authenticity_token'})
# authenticity_token = tag.get('value')
# c1 = i1.cookies.get_dict()
# i1.close()
#
# # 1. 携带authenticity_token和用户名密码等信息,发送用户验证
# form_data = {
# "authenticity_token": authenticity_token,
# "utf8": "",
# "commit": "Sign in",
# "login": "wupeiqi@live.com",
# 'password': 'xxoo'
# }
#
# i2 = requests.post('https://github.com/session', data=form_data, cookies=c1)
# c2 = i2.cookies.get_dict()
# c1.update(c2)
# i3 = requests.get('https://github.com/settings/repositories', cookies=c1)
#
# soup3 = BeautifulSoup(i3.text, features='lxml')
# list_group = soup3.find(name='div', class_='listgroup')
#
# from bs4.element import Tag
#
# for child in list_group.children:
# if isinstance(child, Tag):
# project_tag = child.find(name='a', class_='mr-1')
# size_tag = child.find(name='small')
# temp = "项目:%s(%s); 项目路径:%s" % (project_tag.get('href'), size_tag.string, project_tag.string, )
# print(temp) # ############## 方式二 ##############
# session = requests.Session()
# # 1. 访问登陆页面,获取 authenticity_token
# i1 = session.get('https://github.com/login')
# soup1 = BeautifulSoup(i1.text, features='lxml')
# tag = soup1.find(name='input', attrs={'name': 'authenticity_token'})
# authenticity_token = tag.get('value')
# c1 = i1.cookies.get_dict()
# i1.close()
#
# # 1. 携带authenticity_token和用户名密码等信息,发送用户验证
# form_data = {
# "authenticity_token": authenticity_token,
# "utf8": "",
# "commit": "Sign in",
# "login": "wupeiqi@live.com",
# 'password': 'xxoo'
# }
#
# i2 = session.post('https://github.com/session', data=form_data)
# c2 = i2.cookies.get_dict()
# c1.update(c2)
# i3 = session.get('https://github.com/settings/repositories')
#
# soup3 = BeautifulSoup(i3.text, features='lxml')
# list_group = soup3.find(name='div', class_='listgroup')
#
# from bs4.element import Tag
#
# for child in list_group.children:
# if isinstance(child, Tag):
# project_tag = child.find(name='a', class_='mr-1')
# size_tag = child.find(name='small')
# temp = "项目:%s(%s); 项目路径:%s" % (project_tag.get('href'), size_tag.string, project_tag.string, )
# print(temp) github

示例

自动登录知乎

#!/usr/bin/env python
# -*- coding:utf-8 -*-
import time import requests
from bs4 import BeautifulSoup session = requests.Session() i1 = session.get(
url='https://www.zhihu.com/#signin',
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
}
) soup1 = BeautifulSoup(i1.text, 'lxml')
xsrf_tag = soup1.find(name='input', attrs={'name': '_xsrf'})
xsrf = xsrf_tag.get('value') current_time = time.time()
i2 = session.get(
url='https://www.zhihu.com/captcha.gif',
params={'r': current_time, 'type': 'login'},
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
}) with open('zhihu.gif', 'wb') as f:
f.write(i2.content) captcha = input('请打开zhihu.gif文件,查看并输入验证码:')
form_data = {
"_xsrf": xsrf,
'password': 'xxooxxoo',
"captcha": 'captcha',
'email': '424662508@qq.com'
}
i3 = session.post(
url='https://www.zhihu.com/login/email',
data=form_data,
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
}
) i4 = session.get(
url='https://www.zhihu.com/settings/profile',
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
}
) soup4 = BeautifulSoup(i4.text, 'lxml')
tag = soup4.find(id='rename-section')
nick_name = tag.find('span',class_='name').string
print(nick_name) 知乎

示例

自动登录博客园

#!/usr/bin/env python
# -*- coding:utf-8 -*-
import re
import json
import base64 import rsa
import requests def js_encrypt(text):
b64der = 'MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCp0wHYbg/NOPO3nzMD3dndwS0MccuMeXCHgVlGOoYyFwLdS24Im2e7YyhB0wrUsyYf0/nhzCzBK8ZC9eCWqd0aHbdgOQT6CuFQBMjbyGYvlVYU2ZP7kG9Ft6YV6oc9ambuO7nPZh+bvXH0zDKfi02prknrScAKC0XhadTHT3Al0QIDAQAB'
der = base64.standard_b64decode(b64der) pk = rsa.PublicKey.load_pkcs1_openssl_der(der)
v1 = rsa.encrypt(bytes(text, 'utf8'), pk)
value = base64.encodebytes(v1).replace(b'\n', b'')
value = value.decode('utf8') return value session = requests.Session() i1 = session.get('https://passport.cnblogs.com/user/signin')
rep = re.compile("'VerificationToken': '(.*)'")
v = re.search(rep, i1.text)
verification_token = v.group(1) form_data = {
'input1': js_encrypt('wptawy'),
'input2': js_encrypt('asdfasdf'),
'remember': False
} i2 = session.post(url='https://passport.cnblogs.com/user/signin',
data=json.dumps(form_data),
headers={
'Content-Type': 'application/json; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'VerificationToken': verification_token}
) i3 = session.get(url='https://i.cnblogs.com/EditDiary.aspx') print(i3.text) 博客园

示例

基于Requests和BeautifulSoup实现“自动登录”的更多相关文章

  1. requests、BeautifulSoup、自动登陆示例

    requests Python标准库中提供了:urllib.urllib2.httplib等模块以供Http请求,但是,它的 API 太渣了.它是为另一个时代.另一个互联网所创建的.它需要巨量的工作, ...

  2. 爬虫实战--基于requests和beautifulsoup的妹子网图片爬取(福利哦!)

    #coding=utf-8 import requests from bs4 import BeautifulSoup import os all_url = 'http://www.mzitu.co ...

  3. python+selenium+webdriver+BeautifulSoup实现自动登录

    from selenium import webdriverimport timefrom bs4 import BeautifulSoupfrom urllib import requestimpo ...

  4. 爬虫学习之基于Scrapy的爬虫自动登录

    ###概述 在前面两篇(爬虫学习之基于Scrapy的网络爬虫和爬虫学习之简单的网络爬虫)文章中我们通过两个实际的案例,采用不同的方式进行了内容提取.我们对网络爬虫有了一个比较初级的认识,只要发起请求获 ...

  5. 基于localStorge开发登录模块的记住密码与自动登录

    前沿||我是乐于分享,善于交流的鸟窝 先做写一篇关于登录模块中记住密码与自动登录的模块.鸟窝微信:jkxx123321 关于这个模块功能模块的由来,这是鸟大大的处女秀,为什么这么说呢?一天在群里,一个 ...

  6. 自动化测试: Selenium 自动登录授权,再 Requests 请求内容

    Selenium 自动登录网站.截图及 Requests 抓取登录后的网页内容.一起了解下吧. Selenium: 支持 Web 浏览器自动化的一系列工具和库的综合项目. Requests: 唯一的一 ...

  7. Python+fiddler(基于Cookie绕过验证码自动登录)

    案例:使用Cookie绕过百度验证码自动登录账户 步骤: 1.浏览器进入百度首页,点击登录按钮,输入相关信息(注意:暂时不要点击登录按钮) 2.进入fiddler,首先获取证书,Tools--> ...

  8. requests和BeautifulSoup模块的使用

    用python写爬虫时,有两个很好用第三方模块requests库和beautifulsoup库,简单学习了下模块用法: 1,requests模块 Python标准库中提供了:urllib.urllib ...

  9. Python 爬虫实战(一):使用 requests 和 BeautifulSoup

    Python 基础 我之前写的<Python 3 极简教程.pdf>,适合有点编程基础的快速入门,通过该系列文章学习,能够独立完成接口的编写,写写小东西没问题. requests requ ...

随机推荐

  1. CCNA2.0笔记_ACL

    要点: 1.按顺序执行,一旦某条语句匹配,后续语句不再处理. 2.默认ACL 结尾语句是deny any,所以你要记住的是在ACL 里至少要有1 条permit 语句. 3.记得创建了ACL 后要把它 ...

  2. [转]c++ virtual public的含义和作用

    我在写基于MICO的CORBA程序的时候遇到的,上网查了一下 转自:http://bbs.seu.edu.cn/pc/pccon.php?id=872&nid=16822 Question:父 ...

  3. Nmap笔记本

    nmap -vv 192.168.1.100 -p1-65535 跑1-65535的端口并且设置对结果的详细输出 nmap -vv 192.168.1.1/8 -p 1433 --open 跑开放14 ...

  4. gm: error while loading shared libraries: libpng15.so.15: cannot open shared object file: No such file or directory

    安装gm库产生问题 解决方案: # cat /etc/ld.so.confinclude ld.so.conf.d/*.conf# echo "/usr/local/lib" &g ...

  5. java模拟从http上下载文件

    1.依赖 Apache httpclient 包. 2.代码 HttpClient httpclient = new DefaultHttpClient(); HttpPost httppost = ...

  6. 关于OutputStream的write方法FAQ(from bbs.csdn.net)

    问: Java code ? 1 2 3 4 5 6 7 8 9 10 11 FileInputStream fis = new FileInputStream(new File("C:\\ ...

  7. 【BZOJ】2014: [Usaco2010 Feb]Chocolate Buying(贪心)

    http://www.lydsy.com/JudgeOnline/problem.php?id=2014 这应该是显然的贪心吧,先排序,然后按花费取 #include <cstdio> # ...

  8. 微信小程序 事件

    事件详解 事件分类 事件分为冒泡事件和非冒泡事件: 冒泡事件:当一个组件上的事件被触发后,该事件会向父节点传递. 非冒泡事件:当一个组件上的事件被触发后,该事件不会向父节点传递. WXML的冒泡事件列 ...

  9. MFC中CString.Format的用法

    http://www.cnblogs.com/kongtiao/archive/2012/06/13/2548033.html 在MFC程序中,使用CString来处理字符串是一个很不错的选择.CSt ...

  10. PHP urlencode

    url get传参时,对参数值需要用urlencode()处理,防止参数中含有特殊字符&等 例如: 一产品名称为A&T Plastic,在产品列表中就产生了这样的一个联接<a h ...