Python3.5爬取豆瓣电视剧数据并且同步到mysql中
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Python: 3.5
# Author: zhenghai.zhang@xxx.com
# Program: 爬取豆瓣网站上所有电视剧的名称并写入数据库。
# Version: 0.1
# History: 2017.11.01 import requests,time, pymysql, re, datetime, json
from exchangelib import DELEGATE, Account, Credentials, Message, Mailbox, HTMLBody host = 'xxx'
user = 'xxx'
passwd = 'xxx'
dbme = 'crawl'
dbtarget = 'back_brace'
table = 'tv_hotwords'
tabledelta = 'tv_hotwords_delta'
tablesync = 'slot_value'
port = 3306
tolist = ['zhenghai.zhang@xxx.com'] def get_tvs(urlbase, page):
try:
url = urlbase + str(page)
print(url)
try:
result = requests.get(url).text
jresult = json.loads(result)
tvs = jresult.get('subjects')
except:
print('爬取' + urlbase + str(page) + '失败!')
time.sleep(2)
return tvs
except:
print('获取第%s页电影列表失败' % page) def tv_insert(host, user, passwd, dbme, port, table, tvs_list):
conn = pymysql.connect(host=host, user=user, passwd=passwd, db=dbme, port=port, charset="utf8")
cur = conn.cursor()
new_tvs = []
punc = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏.()::。· "
punctuation = punc
for tv in tvs_list:
try:
tv['title'] = re.sub(r"[%s]+" % punctuation, "", tv.get('title'))
cmd = 'insert into %s(tv_id, tv_name) values("%s", "%s")' % (
table, tv.get('id'), tv.get('title'))
cur.execute(cmd)
new_tvs.append(tv)
except:
print(" " * 20, tv.get('title'), "already exists, skip……")
cur.close()
conn.commit()
conn.close()
return new_tvs def tv_new_and_sync(host, user, passwd, dbme, dbtarget, port, tabledelta, tvs_list, tablesync):
conn = pymysql.connect(host=host, user=user, passwd=passwd, db=dbme, port=port, charset="utf8")
cur = conn.cursor()
cur.execute("delete from %s " % dbme+"."+tabledelta) for tv in tvs_list:
try:
cmd = 'insert into %s(tv_id, tv_name) values("%s", "%s")' % (tabledelta, tv['id'], tv['title'])
cmdsync = 'insert into %s(slot_type_id, slot_value, create_by, modify_by, gmt_create, gmt_modify, out_value) values("%s", "%s", "%s", "%s", "%s", "%s", "%s")' % (dbtarget+"."+tablesync, "xxxxxx", tv['title'], "system", "system", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "")
cur.execute(cmd)
cur.execute(cmdsync)
except pymysql.Error:
print(" " * 20, tv['title'], "already exists, skip……")
cur.close()
conn.commit()
conn.close() def tv_new_to_release(host, user, passwd, dbtarget, port):
conn = pymysql.connect(host=host, user=user, passwd=passwd, db=dbtarget, port=port, charset="utf8")
cur = conn.cursor()
try:
cmdbacktoskill = 'insert into back_brace.release_task(app_type,app_status,type,ref_id,status,register_id,create_by,modify_by,gmt_create,gmt_modify) values("BACKBRACE","testpass","SLOT","xxxxxx","init","SLOT_BACKBRACE_TESTPASS" ,"zhenghai.zhang","zhenghai.zhang","%s","%s")' % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
cmdskilltoskillpro = 'insert into back_brace.release_task(app_type,app_status,type,ref_id,status,register_id,create_by,modify_by,gmt_create,gmt_modify) values("SKILL","deploy","SLOT","xxxxxx","init","SLOT_SKILL_DEPLOY" ,"zhenghai.zhang","zhenghai.zhang","%s","%s")' % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) print(cmdbacktoskill)
cur.execute(cmdbacktoskill)
print(cmdskilltoskillpro)
cur.execute(cmdskilltoskillpro)
except pymysql.Error:
print("write into back_brace.release_task error!!!")
cur.close()
conn.commit()
conn.close() def Email(to, subject, body):
creds = Credentials(
username='xxxxxx',
password='xxxxxx')
account = Account(
primary_smtp_address='xxx@xxx.com',
credentials=creds,
autodiscover=True,
access_type=DELEGATE)
m = Message(
account=account,
subject=subject,
body=HTMLBody(body),
to_recipients=[Mailbox(email_address=to)])
m.send_and_save() if __name__ == '__main__':
update_tvs = []
pages = 25 # 豆瓣每项电视剧只有前500部
# 美剧 英剧 韩剧 日剧 国产剧 港剧 日本动画 综艺
urlbaselist = ['https://movie.douban.com/j/search_subjects?type=tv&tag=%E7%BE%8E%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E8%8B%B1%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E9%9F%A9%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E6%97%A5%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E5%9B%BD%E4%BA%A7%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E6%B8%AF%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E6%97%A5%E6%9C%AC%E5%8A%A8%E7%94%BB&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E7%BB%BC%E8%89%BA&sort=recommend&page_limit=20&page_start='] for urlbase in urlbaselist:
for i in range(pages):
print("*"*30, i, "*"*30)
tvs_list = get_tvs(urlbase, i * 20)
new_tvs = tv_insert(host, user, passwd, dbme, port, table, tvs_list) for tv in new_tvs:
print(tv['title'],"Added")
onetv = {}
onetv["id"] = tv["id"]
onetv["title"] = tv["title"]
update_tvs.append(onetv)
time.sleep(1)
print(update_tvs) try:
tv_new_and_sync(host, user, passwd, dbme, dbtarget, port, tabledelta, update_tvs, tablesync) # 将增加的电影写入movie_hotwords_delta表中
except:
print("tv update and sync Error!") try:
tv_new_to_release(host, user, passwd, dbtarget, port)
except:
print("tv_new_to_release error!!!") subject = '本次新增电视剧名称'
body = "本次新增的电影名称为:<hr>"
for movie in update_tvs:
body += movie['title'] + "<br>"
for to in tolist:
Email(to, subject, body)
还请各位大侠指点
Python3.5爬取豆瓣电视剧数据并且同步到mysql中的更多相关文章
- Python3.5爬取cbooo.cn数据并且同步到mysql中
#!/usr/local/bin/python # -*- coding: utf-8 -*- # Python: 3.5 # Author: wucl(),zhenghai.zhang # Prog ...
- Scrapy 通过登录的方式爬取豆瓣影评数据
Scrapy 通过登录的方式爬取豆瓣影评数据 爬虫 Scrapy 豆瓣 Fly 由于需要爬取影评数据在来做分析,就选择了豆瓣影评来抓取数据,工具使用的是Scrapy工具来实现.scrapy工具使用起来 ...
- 【python数据挖掘】爬取豆瓣影评数据
概述: 爬取豆瓣影评数据步骤: 1.获取网页请求 2.解析获取的网页 3.提速数据 4.保存文件 源代码: # 1.导入需要的库 import urllib.request from bs4 impo ...
- python系列之(3)爬取豆瓣图书数据
上次介绍了beautifulsoup的使用,那就来进行运用下吧.本篇将主要介绍通过爬取豆瓣图书的信息,存储到sqlite数据库进行分析. 1.sqlite SQLite是一个进程内的库,实现了自给自足 ...
- python爬虫-爬取豆瓣电影数据
#!/usr/bin/python# coding=utf-8# 作者 :Y0010026# 创建时间 :2018/12/16 16:27# 文件 :spider_05.py# IDE :PyChar ...
- node 爬虫 --- 将爬取到的数据,保存到 mysql 数据库中
步骤一:安装必要模块 (1)cheerio模块 ,一个类似jQuery的选择器模块,分析HTML利器. (2)request模块,让http请求变的更加简单 (3)mysql模块,node连接mysq ...
- 利用selenium 爬取豆瓣 武林外传数据并且完成 数据可视化 情绪分析
全文的步骤可以大概分为几步: 一:数据获取,利用selenium+多进程(linux上selenium 多进程可能会有问题)+kafka写数据(linux首选必选耦合)windows直接采用的是写my ...
- Scrapy爬取豆瓣图书数据并写入MySQL
项目地址 BookSpider 介绍 本篇涉及的内容主要是获取分类下的所有图书数据,并写入MySQL 准备 Python3.6.Scrapy.Twisted.MySQLdb等 演示 代码 一.创建项目 ...
- python3 爬虫---爬取豆瓣电影TOP250
第一次爬取的网站就是豆瓣电影 Top 250,网址是:https://movie.douban.com/top250?start=0&filter= 分析网址'?'符号后的参数,第一个参数's ...
随机推荐
- 关于JAVA路径 问题
1.基本概念的理解 绝对路径:绝对路径就是你的主页上的文件或目录在硬盘上真正的路径,(URL和物理路径)例如: C:\xyz\test.txt 代表了test.txt文件的绝对路径.http://w ...
- 【LeetCode】210. Course Schedule II
Course Schedule II There are a total of n courses you have to take, labeled from 0 to n - 1. Some co ...
- inotify-tools命令使用讲解
inotify-tools 是为linux下inotify文件监控工具提供的一套c的开发接口库函数,同时还提供了一系列的命令行工具,这些工具可以用来监控文件系统的事件. inotify-tools是用 ...
- Nginx 在ubuntu14.04下的安装
来源:http://blog.csdn.net/hanshileiai/article/details/45580001 按照步骤一步一步来,绝对ok 1.如果出现错误: *4 connect() t ...
- Magento EAV模型
网址:http://www.ruiwant.com/magento-for-dev-part-7-advanced-orm-entity-attribute-value.html
- 安卓高手之路之ClassLoader(三)
由于看C++和C代码看得很累,很辛苦.上一章终于解脱到java代码中来了. 第一个getClassLoader发生在main的preload方法中, public static void main(S ...
- 【MySQL】MySQL之MySQL5.7中文乱码
自己的MySQL服务器不能添加中文,于是自己使用 show variables like 'character%'; 查看了当前的编码格式 我又通过以下方法将其设置为utf-8 SET charact ...
- HANA初印象
今天手懒,看了一些SAP HANA 的一些外文介绍,不翻译了,直接剽窃过来,供参考. 1. HANA 是什么东西? “HANA doesn't actually mean anything, but ...
- MySQL中 optimize table '表名'的作用
语法: optimize table '表名' 一,原始数据 1,数据量 2,存放在硬盘中的表文件大小 3,查看一下索引信息 索引信息中的列的信息说明. Table :表的名称.Non_unique: ...
- 【转载并记录】SpringBoot 入门(一)
https://blog.csdn.net/dhklsl/article/details/80309999 https://www.cnblogs.com/zheting/p/6707035.html ...