#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Python: 3.5
# Author: zhenghai.zhang@xxx.com
# Program: 爬取豆瓣网站上所有电视剧的名称并写入数据库。
# Version: 0.1
# History: 2017.11.01 import requests,time, pymysql, re, datetime, json
from exchangelib import DELEGATE, Account, Credentials, Message, Mailbox, HTMLBody host = 'xxx'
user = 'xxx'
passwd = 'xxx'
dbme = 'crawl'
dbtarget = 'back_brace'
table = 'tv_hotwords'
tabledelta = 'tv_hotwords_delta'
tablesync = 'slot_value'
port = 3306
tolist = ['zhenghai.zhang@xxx.com'] def get_tvs(urlbase, page):
try:
url = urlbase + str(page)
print(url)
try:
result = requests.get(url).text
jresult = json.loads(result)
tvs = jresult.get('subjects')
except:
print('爬取' + urlbase + str(page) + '失败!')
time.sleep(2)
return tvs
except:
print('获取第%s页电影列表失败' % page) def tv_insert(host, user, passwd, dbme, port, table, tvs_list):
conn = pymysql.connect(host=host, user=user, passwd=passwd, db=dbme, port=port, charset="utf8")
cur = conn.cursor()
new_tvs = []
punc = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏.()::。· "
punctuation = punc
for tv in tvs_list:
try:
tv['title'] = re.sub(r"[%s]+" % punctuation, "", tv.get('title'))
cmd = 'insert into %s(tv_id, tv_name) values("%s", "%s")' % (
table, tv.get('id'), tv.get('title'))
cur.execute(cmd)
new_tvs.append(tv)
except:
print(" " * 20, tv.get('title'), "already exists, skip……")
cur.close()
conn.commit()
conn.close()
return new_tvs def tv_new_and_sync(host, user, passwd, dbme, dbtarget, port, tabledelta, tvs_list, tablesync):
conn = pymysql.connect(host=host, user=user, passwd=passwd, db=dbme, port=port, charset="utf8")
cur = conn.cursor()
cur.execute("delete from %s " % dbme+"."+tabledelta) for tv in tvs_list:
try:
cmd = 'insert into %s(tv_id, tv_name) values("%s", "%s")' % (tabledelta, tv['id'], tv['title'])
cmdsync = 'insert into %s(slot_type_id, slot_value, create_by, modify_by, gmt_create, gmt_modify, out_value) values("%s", "%s", "%s", "%s", "%s", "%s", "%s")' % (dbtarget+"."+tablesync, "xxxxxx", tv['title'], "system", "system", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "")
cur.execute(cmd)
cur.execute(cmdsync)
except pymysql.Error:
print(" " * 20, tv['title'], "already exists, skip……")
cur.close()
conn.commit()
conn.close() def tv_new_to_release(host, user, passwd, dbtarget, port):
conn = pymysql.connect(host=host, user=user, passwd=passwd, db=dbtarget, port=port, charset="utf8")
cur = conn.cursor()
try:
cmdbacktoskill = 'insert into back_brace.release_task(app_type,app_status,type,ref_id,status,register_id,create_by,modify_by,gmt_create,gmt_modify) values("BACKBRACE","testpass","SLOT","xxxxxx","init","SLOT_BACKBRACE_TESTPASS" ,"zhenghai.zhang","zhenghai.zhang","%s","%s")' % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
cmdskilltoskillpro = 'insert into back_brace.release_task(app_type,app_status,type,ref_id,status,register_id,create_by,modify_by,gmt_create,gmt_modify) values("SKILL","deploy","SLOT","xxxxxx","init","SLOT_SKILL_DEPLOY" ,"zhenghai.zhang","zhenghai.zhang","%s","%s")' % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) print(cmdbacktoskill)
cur.execute(cmdbacktoskill)
print(cmdskilltoskillpro)
cur.execute(cmdskilltoskillpro)
except pymysql.Error:
print("write into back_brace.release_task error!!!")
cur.close()
conn.commit()
conn.close() def Email(to, subject, body):
creds = Credentials(
username='xxxxxx',
password='xxxxxx')
account = Account(
primary_smtp_address='xxx@xxx.com',
credentials=creds,
autodiscover=True,
access_type=DELEGATE)
m = Message(
account=account,
subject=subject,
body=HTMLBody(body),
to_recipients=[Mailbox(email_address=to)])
m.send_and_save() if __name__ == '__main__':
update_tvs = []
pages = 25 # 豆瓣每项电视剧只有前500部
# 美剧 英剧 韩剧 日剧 国产剧 港剧 日本动画 综艺
urlbaselist = ['https://movie.douban.com/j/search_subjects?type=tv&tag=%E7%BE%8E%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E8%8B%B1%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E9%9F%A9%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E6%97%A5%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E5%9B%BD%E4%BA%A7%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E6%B8%AF%E5%89%A7&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E6%97%A5%E6%9C%AC%E5%8A%A8%E7%94%BB&sort=recommend&page_limit=20&page_start=',
'https://movie.douban.com/j/search_subjects?type=tv&tag=%E7%BB%BC%E8%89%BA&sort=recommend&page_limit=20&page_start='] for urlbase in urlbaselist:
for i in range(pages):
print("*"*30, i, "*"*30)
tvs_list = get_tvs(urlbase, i * 20)
new_tvs = tv_insert(host, user, passwd, dbme, port, table, tvs_list) for tv in new_tvs:
print(tv['title'],"Added")
onetv = {}
onetv["id"] = tv["id"]
onetv["title"] = tv["title"]
update_tvs.append(onetv)
time.sleep(1)
print(update_tvs) try:
tv_new_and_sync(host, user, passwd, dbme, dbtarget, port, tabledelta, update_tvs, tablesync) # 将增加的电影写入movie_hotwords_delta表中
except:
print("tv update and sync Error!") try:
tv_new_to_release(host, user, passwd, dbtarget, port)
except:
print("tv_new_to_release error!!!") subject = '本次新增电视剧名称'
body = "本次新增的电影名称为:<hr>"
for movie in update_tvs:
body += movie['title'] + "<br>"
for to in tolist:
Email(to, subject, body)

还请各位大侠指点

Python3.5爬取豆瓣电视剧数据并且同步到mysql中的更多相关文章

  1. Python3.5爬取cbooo.cn数据并且同步到mysql中

    #!/usr/local/bin/python # -*- coding: utf-8 -*- # Python: 3.5 # Author: wucl(),zhenghai.zhang # Prog ...

  2. Scrapy 通过登录的方式爬取豆瓣影评数据

    Scrapy 通过登录的方式爬取豆瓣影评数据 爬虫 Scrapy 豆瓣 Fly 由于需要爬取影评数据在来做分析,就选择了豆瓣影评来抓取数据,工具使用的是Scrapy工具来实现.scrapy工具使用起来 ...

  3. 【python数据挖掘】爬取豆瓣影评数据

    概述: 爬取豆瓣影评数据步骤: 1.获取网页请求 2.解析获取的网页 3.提速数据 4.保存文件 源代码: # 1.导入需要的库 import urllib.request from bs4 impo ...

  4. python系列之(3)爬取豆瓣图书数据

    上次介绍了beautifulsoup的使用,那就来进行运用下吧.本篇将主要介绍通过爬取豆瓣图书的信息,存储到sqlite数据库进行分析. 1.sqlite SQLite是一个进程内的库,实现了自给自足 ...

  5. python爬虫-爬取豆瓣电影数据

    #!/usr/bin/python# coding=utf-8# 作者 :Y0010026# 创建时间 :2018/12/16 16:27# 文件 :spider_05.py# IDE :PyChar ...

  6. node 爬虫 --- 将爬取到的数据,保存到 mysql 数据库中

    步骤一:安装必要模块 (1)cheerio模块 ,一个类似jQuery的选择器模块,分析HTML利器. (2)request模块,让http请求变的更加简单 (3)mysql模块,node连接mysq ...

  7. 利用selenium 爬取豆瓣 武林外传数据并且完成 数据可视化 情绪分析

    全文的步骤可以大概分为几步: 一:数据获取,利用selenium+多进程(linux上selenium 多进程可能会有问题)+kafka写数据(linux首选必选耦合)windows直接采用的是写my ...

  8. Scrapy爬取豆瓣图书数据并写入MySQL

    项目地址 BookSpider 介绍 本篇涉及的内容主要是获取分类下的所有图书数据,并写入MySQL 准备 Python3.6.Scrapy.Twisted.MySQLdb等 演示 代码 一.创建项目 ...

  9. python3 爬虫---爬取豆瓣电影TOP250

    第一次爬取的网站就是豆瓣电影 Top 250,网址是:https://movie.douban.com/top250?start=0&filter= 分析网址'?'符号后的参数,第一个参数's ...

随机推荐

  1. MVC2 扩展Models和自定义验证(学习笔记)

    当我们利用Visual Studio生成实体类以后,难免会用到验证功能(例如,用户登录时验证用户名是否为空,并加以显示). Visual Studio实体类:实体类 如果直接去编辑Visual Stu ...

  2. 使用Sense操作ElasticSearch CRUD

    安装完成之后,我们该开始学习关于ElasticSearch最基本的CURD操作了. ElasticSearch作为一个基于Lucene的搜索服务器.它提供了一个分布式多用户能力的全文搜索引擎,其接口也 ...

  3. 基于Amoba实现mysql主从读写分离

    一.Amoeba简介           Amoeba是一个以MySQL为底层数据存储,并对应用提供MySQL协议接口的proxy.它集中地响应应用的请求,依据用户事先设置的规则,将SQL请求发送到特 ...

  4. 安卓获取ListView、GridView等滚动的距离(高度)

    public int getScrollY() { View c = mListView.getChildAt(0); if (c == null) { return 0; } int firstVi ...

  5. android图片等比例缩放 填充屏幕

    在ImageView的t同事设置两个属性 android:adjustViewBounds="true"android:scaleType="fitXY"

  6. python中数组与多维数组用法介绍

    增加时a.append( 'a ')就可以了.只要按顺序加,就没有问题 . 使用时,完全可以使用下标: 代码如下 复制代码 a[0] a[1] 但出果引用不存在的下标,则会引发异常.这时,你需要先添加 ...

  7. Latex中如何设置字体颜色(3种方式)

    Latex中如何设置字体颜色(三种方式)   1.直接使用定义好的颜色 \usepackage{color} \textcolor{red/blue/green/black/white/cyan/ma ...

  8. MySql计算两个日期的时间差函数

    MySql计算两个日期时间的差函数: 第一种:TIMESTAMPDIFF函数,需要传入三个参数,第一个是比较的类型,可以比较FRAC_SECOND.SECOND. MINUTE. HOUR. DAY. ...

  9. 利用Regsvr32绕过Applocker的限制策略

    Metasploit Metasploit框架有一个特定的有效载荷,可用于通过Regsvr32实用程序实现自动化绕过AppLocker exploit/windows/misc/regsvr32_ap ...

  10. Gradle 1.12用户指南翻译

    http://blog.csdn.net/maosidiaoxian/article/category/2219983