Python爬虫:爬取某网站关键词对应商品ID,且存入DB2数据库
公司研发不给力,我就自己写了一个,专门爬关键词对应的商品ID。
其中还学会了用Python操作DB2数据库、Python发送邮件、写日志文件、处理浏览器访问限制。
#!/usr/bin/python
# -*- encoding:utf-8 -*- import requests
from lxml import etree
import ibm_db
import logging
import sys
import time
import smtplib #配置写入日志
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='keywords_weekly.log',
filemode='a') #编码
reload(sys)
sys.setdefaultencoding('utf-8') # 解决服务器限制访问问题
def get_url_data(url,headers,max_tries=10):
remaining_tries = max_tries
while remaining_tries > 0:
try:
return requests.get(url,headers=headers)
except requests.exceptions:
time.sleep(60)
remaining_tries = remaining_tries - 1
raise Exception("Couldn't get the url_data.") #写入db2
def write_db2(resultdict):
rank=resultdict['rank']
#由于中文编码问题,关键词直接用update的方法更新
# keywords=resultdict['keywords']
uv=resultdict['uv']
frequency=resultdict['frequency']
goods_id=resultdict['goods_id']
sql_in="insert into T_KEYWORDS_weekly(K_RANK,UV,FREQUENCY,GOODS_ID,week_YEAR)" \
" values (%r,%r,%r,%r,year(current date)||'-'||WEEK_ISO(current date))" % (rank,uv,frequency,goods_id)
ibm_db.exec_immediate(conn, sql_in)
ibm_db.commit(conn) # #翻页
def get_html(keywords):
#keywords="沙发"
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.63 Safari/537.36'
headers = { 'User-Agent' : user_agent }
#网址被我隐藏了哈,可以猜猜
url='http://www.XXX.com/category-9999/list-p1/?fl=q&keywords=%s'%keywords
html = get_url_data(url,headers)
html_list=[html]
selector = etree.HTML(html.text)
#page_f="http://www.meilele.com"
#得到共有多少页,循环得到各个页面的url
page_e=selector.xpath('/html/body/div[@class="page-panel"]/div/div/span[@class="p-info"]/b/text()')
if page_e:
for i in range(2,int(page_e[0])+1):
url_temp='http://www.meilele.com/category-9999/list-p%d/?fl=q&keywords=%s'%(i,keywords)
html_temp=requests.get(url_temp,headers=headers)
html_list.append(html_temp)
return html_list # #获取内容
def get_id(dictionary):
keywords=dictionary[1]
html_list=get_html(keywords)
logging.info("get the html_list %s successfully" %keywords)
for each in html_list:
html=each
selector = etree.HTML(html.text)
result={}
try:
content_field = selector.xpath('//*[@id="JS_list_panel"]/div[@class="w list-wrap"]/ul[@class="list-goods clearfix"]')[0]
except:
content_field=[]
result['rank']=str(dictionary[0])
# result['keywords']=str(keywords)
result['uv']=str(dictionary[2])
result['frequency']=str(dictionary[3])
result['goods_id']=str('')
write_db2(result)
else:
for i in range(1,len(content_field)+1):
goods_id = content_field.xpath('li[%d]/@data-goods-id'%i)[0]
#return goods_id
result['rank']=str(dictionary[0])
# result['keywords']=str(keywords)
result['uv']=str(dictionary[2])
result['frequency']=str(dictionary[3])
result['goods_id']=str(goods_id)
write_db2(result) if __name__ == "__main__":
#把密码也隐藏起来
conn=ibm_db.connect("DATABASE=aedw;HOSTNAME=miranda;PORT=50000;PROTOCOL=TCPIP;UID=miranda; PWD=miranda;", "", "")
#测试连接
try:
conn
logging.info("connect to DB2 successfully")
except:
logging.info("couldn't connect to DB2")
# #创建表
# sql_create='create table T_keywords_weekly_TEMP like V_keywords_weekly'
# stmt_create = ibm_db.exec_immediate(conn, sql_create)
# try:
# stmt_create
# logging.info("create table T_keywords_weekly_TEMP successfully")
# except:
# logging.info("couldn't create table T_keywords_weekly_TEMP")
# #插入表
# sql_insert="insert into T_keywords_weekly_TEMP select * from V_keywords_weekly where rank>=100"
# stmt_insert = ibm_db.exec_immediate(conn, sql_insert)
# try:
# stmt_insert
# logging.info("insert into T_keywords_weekly_TEMP successfully")
# except:
# logging.info("couldn't insert into table T_keywords_weekly_TEMP")
sql_select="select * from T_keywords_weekly_TEMP where rank>=162"
stmt_select = ibm_db.exec_immediate(conn, sql_select)
try:
stmt_select
logging.info("get the data from T_keywords_weekly_TEMP")
except:
logging.info("couldn't get the data from T_keywords_weekly_TEMP")
else:
dictionary = ibm_db.fetch_both(stmt_select)
while dictionary != False:
logging.info ('rank:'+str(dictionary[0])+ ' keywords:'+str(dictionary[1]))
get_id(dictionary)
dictionary = ibm_db.fetch_both(stmt_select) #这一句不能少啊
# 更新关键字
sql_update='''
MERGE INTO T_KEYWORDS_weekly as tkm
USING T_keywords_weekly_TEMP as tkmt
ON tkm.K_RANK=tkmt.RANK
and tkm.week_YEAR=year(current date)||'-'||WEEK_ISO(current date)
WHEN MATCHED
THEN UPDATE SET tkm.KEYWORDS=tkmt.KEYWORDS
ELSE IGNORE
'''
stmt_update=ibm_db.exec_immediate(conn, sql_update)
try:
stmt_update
logging.info("update the keywords")
except:
logging.info("couldn't update the keywords") # sql_drop="drop table T_keywords_weekly_TEMP"
# stmt_drop = ibm_db.exec_immediate(conn, sql_drop)
# try:
# stmt_drop
# logging.info("drop table T_keywords_weekly_TEMP successfully")
# except:
# logging.info("couldn't drop table T_keywords_weekly_TEMP")
#
# ibm_db.close(conn)
# logging.info("close the connect!") # 配置发送邮件
sender = 'tangxin2@meilele.com'
receivers = ['tangxin2@meilele.com']
SUBJECT = "Successfully update T_KEYWORDS_weekly"
TEXT = '''
Dear miranda,
your python script of update T_KEYWORDS_weekly of last month have successed.
may you have a good mood
''' message = """\
From: %s
To: %s
Subject: %s %s
""" % (sender, ", ".join(receivers), SUBJECT, TEXT) try:
smtpObj = smtplib.SMTP('mail.meilele.com', 25)
smtpObj.sendmail(sender, receivers, message)
logging.info("Successfully sent email")
except:
logging.info("Error: unable to send email" ) print('finish')
Python爬虫:爬取某网站关键词对应商品ID,且存入DB2数据库的更多相关文章
- python爬虫--爬取某网站电影下载地址
前言:因为自己还是python世界的一名小学生,还有很多路要走,所以本文以目的为向导,达到目的即可,对于那些我自己都没弄懂的原理,不做去做过多解释,以免误人子弟,大家可以网上搜索. 友情提示:本代码用 ...
- python爬虫--爬取某网站电影信息并写入mysql数据库
书接上文,前文最后提到将爬取的电影信息写入数据库,以方便查看,今天就具体实现. 首先还是上代码: # -*- coding:utf-8 -*- import requests import re im ...
- Python爬虫爬取豆瓣电影名称和链接,分别存入txt,excel和数据库
前提条件是python操作excel和数据库的环境配置是完整的,这个需要在python中安装导入相关依赖包: 实现的具体代码如下: #!/usr/bin/python# -*- coding: utf ...
- 用Python爬虫爬取广州大学教务系统的成绩(内网访问)
用Python爬虫爬取广州大学教务系统的成绩(内网访问) 在进行爬取前,首先要了解: 1.什么是CSS选择器? 每一条css样式定义由两部分组成,形式如下: [code] 选择器{样式} [/code ...
- 使用Python爬虫爬取网络美女图片
代码地址如下:http://www.demodashi.com/demo/13500.html 准备工作 安装python3.6 略 安装requests库(用于请求静态页面) pip install ...
- Python爬虫|爬取喜马拉雅音频
"GOOD Python爬虫|爬取喜马拉雅音频 喜马拉雅是知名的专业的音频分享平台,用户规模突破4.8亿,汇集了有声小说,有声读物,儿童睡前故事,相声小品等数亿条音频,成为国内发展最快.规模 ...
- python爬虫—爬取英文名以及正则表达式的介绍
python爬虫—爬取英文名以及正则表达式的介绍 爬取英文名: 一. 爬虫模块详细设计 (1)整体思路 对于本次爬取英文名数据的爬虫实现,我的思路是先将A-Z所有英文名的连接爬取出来,保存在一个cs ...
- Python爬虫 - 爬取百度html代码前200行
Python爬虫 - 爬取百度html代码前200行 - 改进版, 增加了对字符串的.strip()处理 源代码如下: # 改进版, 增加了 .strip()方法的使用 # coding=utf-8 ...
- python爬虫爬取内容中,-xa0,-u3000的含义
python爬虫爬取内容中,-xa0,-u3000的含义 - CSDN博客 https://blog.csdn.net/aiwuzhi12/article/details/54866310
随机推荐
- PythonGIS可视化—Matplot basemap工具箱
原文链接:http://www.douban.com/group/topic/32821988/ 原文链接:http://www.cnblogs.com/vamei/archive/2012/09/1 ...
- location.reload() 和 location.replace()的区别和应用。
首先介绍两个方法的语法: reload 方法,该方法强迫浏览器刷新当前页面.语法:location.reload([bForceGet]) 参数:bForceGet, 可选参数, 默认为 false ...
- JDK源码中的英文注释翻译(Enum<E extends Enum<E>>)
public abstract class Enum<E extends Enum<E>> implements Comparable<E>, Serializab ...
- Shoot the Bullet ZOJ - 3229有上下界网络流
Code: #include<cstdio> #include<algorithm> #include<vector> #include<queue> ...
- jmeter图片的下载
1.jmeter下载文件 首先添加一个线程组,然后在线程组里面添加一个http请求,因为是获取数据,所有是get请求,写好下载的地址 1.添加线程组 :右键测试计划,添加-Threads(Users) ...
- 用div布局,页面copyright部分始终居于
<!DOCTYPE HTML><html><head><meta http-equiv="Content-Type" content=&q ...
- PHP学习总结(3)——PHP入门篇之PHP的echo语句
Echo语句 echo是PHP中的输出语句,可以把字符串输出(字符串用双引号括起来). 如下代码: <?php echo "Hello world!";?> 注意ech ...
- SpringMVC的DispatcherServlet加载过程
首先在web.xml中配置容器启动监听器,这样在容器启动后Spring会初始化一个ServletContext,负责加载springmvc的九大组件(调用DispatcherServlet.onRef ...
- BLOB的读写操作
//BLOB写入操作package zxt.xsfw.action.ceshi; import javax.servlet.http.HttpServletRequest; import javax. ...
- yii AR 模式操作
Bat::find() ; //返回查询实例 Bat::find()->one() //返回一条数据 Bat::find()->all(); //返回所有数据 Bat::find()-&g ...