Python网络爬虫 - 爬取中证网银行相关信息
最终版:07_中证网(Plus -Pro).py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
import os
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
for qq in range(8):
# query = input("【中证网】请输入你想搜索的内容:")
query = '苏州银行'
#年份
year = [2014,2015,2016,2017,2018,2019,2020,2021]
#总页数
pages = [2,1,1,1,11,1,19,7]
year = year[qq]
pages = pages[qq]
if not os.path.isdir(f'D:/桌面/爬虫-银行/中国证券网/{query}'): # 如果没有此文件夹
os.mkdir(f'D:/桌面/爬虫-银行/中国证券网/{query}') # 创建此文件夹
m = 0
for p in range(1, pages + 1):
url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline=={year}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find_all("table")
datalist = []
for ii in alist:
ss=ii.find('td', style='font-size: 12px;line-height: 24px;color: #333333;margin-top: 4px;')
# print('ss=\n\n',ss)
if ss != None:
ss = ss.get_text()
datalist.append(ss)
# print('data:',datalist,len(datalist))
if not os.path.isdir(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}'): # 如果没有此文件夹
os.mkdir(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}') # 创建此文件夹
for ii in range(len(datalist)):
fp = open(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}/({year}){ii + m + 1}.txt', 'w+', encoding='utf-8')
fp.write(datalist[ii] + '\n') # 只包含文本
print(datalist[ii])
print(f'\n> > >{year}年,第{p}页,第{ii + 1}篇,成功! < < <')
fp.close()
m = m + len(datalist) + 1
print('----------------------------')
print(f'------\n{year}年,爬取完毕----')
print('----------------------------')
历史优化记录:01_中证网.py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
url = f'http://search.cs.com.cn/search?channelid=215308&perpage=&templet=&token=12.1462412070719.47&searchword={query}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all("a")
# print(alist)
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# ----------------单页每个文章---------------------------------
m = 0
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp=open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/{ii+1}.txt','w+',encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print(txt_a.text)
txt_list.append(txt_a.text)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
fp.close()
print(f'>>{ii+1}成功!')
m = ii+1
# +-+++-----------++++++++++-----多页------++++++++++++----------++++
if pages > 1:
for p in range(pages):
url_s = f"http://search.cs.com.cn/search?page={p+1}&channelid=215308&searchword={query}"
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all("a")
# print(alist)
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp = open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/{ii + 1 + m}.txt', 'w+', encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print(txt_a.text)
txt_list.append(txt_a.text)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
print(f'>>{ii + 1 + m}成功!')
m = m + ii + 1
fp.close()
print('---------------\n>>>爬取完毕<<<')
历史优化记录:02_中证网.py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
url = f'http://search.cs.com.cn/search?page=1&channelid=215308&searchword={query}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all("a")
# print(alist)
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# ----------------单页每个文章---------------------------------
m = 0
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp=open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/0/(2021){ii+1}.txt','w+',encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print(txt_a.text)
txt_list.append(txt_a.text)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
fp.close()
print(f'>>{ii+1}成功!')
m = ii+1
# +-+++-----------++++++++++-----多页------++++++++++++----------++++
# +-+++-----------++++++++++-----多页------++++++++++++----------++++
if pages > 1:
for p in range(pages):
url_s = f"http://search.cs.com.cn/search?page={p+1}&channelid=215308&searchword={query}"
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all("a")
# print(alist)
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 "
"Safari/537.36 SLBrowser/7.0.0.6241 SLBChan/30"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp = open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/0/(2021){ii + 1 + m}.txt', 'w+', encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print(txt_a.text)
txt_list.append(txt_a.text)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
print(f'>>{ii + 1 + m}成功!')
m = m + ii + 1
fp.close()
print('---------------\n>>>爬取完毕<<<')
历史优化记录:03_中证网.py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
m = 0
for p in range(1,pages+1):
url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline==2021'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all('a')
weblist = []
for a in alist:
if a.get('href')[:5] == "https":
weblist.append(a.get('href'))
# print('weblist==',weblist)
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp=open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/2021/(2021){ii+m+1}.txt','w+',encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print('txt_a===',txt_a.text)
txt_list.append(txt_a.text)
print(f'\n-++++++++++++++++++第{ii+1}篇文章++++++++++++++++-\n',txt_list,len(txt_list))
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
# print('-----------------------------------')
print(f'\n> > >{ii+1}成功! < < <')
fp.close()
m=m+len(weblist)+1
print('---------------\n>>>爬取完毕<<<')
历史优化记录:04_中证网(网址筛选问题).py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
m = 0
for p in range(1,pages+1):
url = f'http://search.cs.com.cn/search?page={pages}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline==2020'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find_all('a')
print('alist:',alist)
weblist = []
for a in alist:
if a.get('href')[4:] == "http":
weblist.append(a.get('href'))
print('weblist==',weblist)
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find('section').find_all('p')
# print(page_b)
fp=open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/2020/(2020){ii+m+1}.txt','w+',encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print('txt_a===',txt_a.text)
txt_list.append(txt_a.text)
print(f'\n-++++++++++++++++++第{ii+1}篇文章++++++++++++++++-\n',txt_list,len(txt_list))
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
# print('-----------------------------------')
print(f'\n> > >{ii+1}成功! < < <')
fp.close()
m=m+len(weblist)+1
print('---------------\n>>>爬取完毕<<<')
历史优化记录:05_中证网.py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
query = input("【中证网】请输入你想搜索的内容:")
year = int(input('要爬取的年份:'))
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
m = 0
for p in range(1, pages + 1):
url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline=={year}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find("table").find('tr').find_all('a')
# print('alist:', alist)
weblist = []
for a in alist:
if a.get('href')[:4] == "http":
weblist.append(a.get('href'))
print('weblist==', weblist)
# ----------------单页每个文章---------------------------------
for ii in range(len(weblist)):
url_a = weblist[ii]
# print('0=',url_a)
dic_a = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp_a = requests.get(url_a, headers=dic_a, )
resp_a.encoding = 'gbk'
# print('New:\n',resp_a.text)
page_a = BeautifulSoup(resp_a.text, "html.parser") # 指定html解析器
# print('123:\n',page_a)
page_b = page_a.find_all('p')
# print(page_b)
fp = open(f'D:/桌面/爬虫-银行/中国证券网/中国银行/{year}/({year}){ii + m + 1}.txt', 'w+', encoding='utf-8')
txt_list = []
for txt_a in page_b:
# print('txt_a===',txt_a.text)
txt_list.append(txt_a.text)
print(f'\n-++++++++++++++++++第{ii + 1}篇文章++++++++++++++++-\n', txt_list, len(txt_list))
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++文本写入+++++++++++++++++++++++++++++++
for i in range(len(txt_list)):
fp.write(txt_list[i] + '\n') # 只包含文本
# print('-----------------------------------')
print(f'\n> > >{ii + 1}成功! < < <')
fp.close()
m = m + len(weblist) + 1
print('---------------\n>>>爬取完毕<<<')
历史优化记录:06_中证网(Plus).py
# coding=utf-8
import requests
from bs4 import BeautifulSoup
import io
import sys
import os
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') # 改变标准输出的默认编码
# query = input("【中证网】请输入你想搜索的内容:")
query = '交通银行'
year = int(input('要爬取的年份:'))
pages = int(input("要爬取的页数(不小于1):"))
if pages < 1:
exit()
m = 0
for p in range(1, pages + 1):
url = f'http://search.cs.com.cn/search?page={p}&channelid=215308&searchword={query}&keyword={query}&token=12.1462412070719.47&perpage=10&outlinepage=5&&andsen=&total=&orsen=&exclude=&searchscope=×cope=×copecolumn=&orderby=&timeline=={year}'
dic = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"}
resp = requests.get(url, headers=dic, )
resp.encoding = 'utf-8'
# print(resp)
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
print(f'\n>>>--------------------第{p}页---------------------<<<\n')
# print(resp.text)
page = BeautifulSoup(resp.text, "html.parser") # 指定html解析器
alist = page.find_all("table")
datalist = []
for ii in alist:
ss=ii.find('td', style='font-size: 12px;line-height: 24px;color: #333333;margin-top: 4px;')
# print('ss=\n\n',ss)
if ss != None:
ss = ss.get_text()
datalist.append(ss)
# print('data:',datalist,len(datalist))
if not os.path.isdir(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}'): # 如果没有此文件夹
os.mkdir(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}') # 创建此文件夹
for ii in range(len(datalist)):
fp = open(f'D:/桌面/爬虫-银行/中国证券网/{query}/{year}/({year}){ii + m + 1}.txt', 'w+', encoding='utf-8')
fp.write(datalist[ii] + '\n') # 只包含文本
print(datalist[ii])
print(f'\n> > >第{p}页,第{ii + 1}篇,成功! < < <')
fp.close()
m = m + len(datalist) + 1
print('----------------------------')
print(f'------\n{year}年,爬取完毕----')
print('----------------------------')
Python网络爬虫 - 爬取中证网银行相关信息的更多相关文章
- 如何利用Python网络爬虫爬取微信朋友圈动态--附代码(下)
前天给大家分享了如何利用Python网络爬虫爬取微信朋友圈数据的上篇(理论篇),今天给大家分享一下代码实现(实战篇),接着上篇往下继续深入. 一.代码实现 1.修改Scrapy项目中的items.py ...
- 利用Python网络爬虫爬取学校官网十条标题
利用Python网络爬虫爬取学校官网十条标题 案例代码: # __author : "J" # date : 2018-03-06 # 导入需要用到的库文件 import urll ...
- 如何用Python网络爬虫爬取网易云音乐歌曲
今天小编带大家一起来利用Python爬取网易云音乐,分分钟将网站上的音乐down到本地. 跟着小编运行过代码的筒子们将网易云歌词抓取下来已经不再话下了,在抓取歌词的时候在函数中传入了歌手ID和歌曲名两 ...
- 04 Python网络爬虫 <<爬取get/post请求的页面数据>>之requests模块
一. urllib库 urllib是Python自带的一个用于爬虫的库,其主要作用就是可以通过代码模拟浏览器发送请求.其常被用到的子模块在Python3中的为urllib.request和urllib ...
- Python网络爬虫-爬取微博热搜
微博热搜的爬取较为简单,我只是用了lxml和requests两个库 url=https://s.weibo.com/top/summary?Refer=top_hot&topnav=1& ...
- python网络爬虫&&爬取网易云音乐
#爬取网易云音乐 url="https://music.163.com/discover/toplist" #歌单连接地址 url2 = 'http://music.163.com ...
- 如何利用Python网络爬虫抓取微信朋友圈的动态(上)
今天小编给大家分享一下如何利用Python网络爬虫抓取微信朋友圈的动态信息,实际上如果单独的去爬取朋友圈的话,难度会非常大,因为微信没有提供向网易云音乐这样的API接口,所以很容易找不到门.不过不要慌 ...
- 如何利用Python网络爬虫抓取微信好友数量以及微信好友的男女比例
前几天给大家分享了利用Python网络爬虫抓取微信朋友圈的动态(上)和利用Python网络爬虫爬取微信朋友圈动态——附代码(下),并且对抓取到的数据进行了Python词云和wordart可视化,感兴趣 ...
- 利用Python网络爬虫抓取微信好友的所在省位和城市分布及其可视化
前几天给大家分享了如何利用Python网络爬虫抓取微信好友数量以及微信好友的男女比例,感兴趣的小伙伴可以点击链接进行查看.今天小编给大家介绍如何利用Python网络爬虫抓取微信好友的省位和城市,并且将 ...
随机推荐
- linux 系统 解决php -v查看到版本于phpinfo()打印的版本不一致问题
发现问题的原因: 安装zip扩展后,配置成功,但是使用gitlab合并的时候发生错误,经检查,使用phpinfo打印出来的php版本为7.1,而使用linux度服务器 运行 php -v的版本却是5. ...
- 软件工程homework-004
软件工程软件工程homework-004 博客信息 沈阳航空航天大学计算机学院2020软件工程作业 作业要求 https://edu.cnblogs.com/campus/sau/Computer17 ...
- LGP3702题解
很容易想到容斥,计算不包含质数的方案数和总方案数. 设 \(f[n][i]\) 表示长度为 \(n\) 的序列,每个元素的和对 \(p\) 取模的结果. 容易有 \(f[n][i]=\sum_{j=0 ...
- 2022年官网下安装Kibana最全版与官网查阅方法(Kibana8.1.0+Elasticsearch8.1.0)
一.环境整合(需要提前装好) 构建工具(参考工具部署方式) 软件名称 版本 相关文章推荐 Elasticsearch ..* https://www.cnblogs.com/liuyangfirst/ ...
- jdbc model 代码示例
package com.gylhaut.model; import java.util.Date; public class Goddess { @Override public String toS ...
- java的https的get请求
package com.wl.webservice; import java.io.InputStream; import java.net.HttpURLConnection; import jav ...
- Git 工作流简介
1.概述 工作流有各式各样的用法,但也正因此使得在实际工作中如何上手使用增加了难度.这篇指南通过总览公司团队中最常用的几种 Git 工作流让大家可以上手使用. 在阅读的过程中请记住,本文中的几种工作流 ...
- abstract的method是否可同时是static,是否可同时是native,是否可同时是synchronized?
abstract的method 不可以是static的,因为抽象的方法是要被子类实现的,而static与子类扯不上关系! native方法表示该方法要用另外一种依赖平台的编程语言实现的,不存在着被子类 ...
- Spring @Configuration继承
Bean定义继承 Bean定义可以包含许多配置信息,包括构造函数参数,属性值和特定于容器的信息,例如初始化方法,静态工厂方法名称等.子bean定义从父定义继承配置数据.子定义可以覆盖某些值或根据需要添 ...
- 如果一个表有一列定义为 TIMESTAMP,将发生什么?
每当行被更改时,时间戳字段将获取当前时间戳. 列设置为 AUTO INCREMENT 时,如果在表中达到最大值,会发生什么情况? 它会停止递增,任何进一步的插入都将产生错误,因为密钥已被使用. 怎样才 ...