#encoding=utf-8
import requests
from bs4 import BeautifulSoup
import re
import os
from aria2rpc import rpc_addUri
class Cntv(): def openUrl(self,url):
"""
This method is used to open a web site
:param url:Web site to request
:return:Requested object
"""
header = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36"
}
response = requests.get(url, header)
return response
# pass
def getEachEpisodeUrl(self):
"""
Get the address of each episode of the TV play
:return:urls lists
"""
urls = []
# response = requests.get(self.url)
url = "http://tv.cctv.com/2014/07/07/VIDA1404730290373811.shtml"
response = self.openUrl(url)
html = response.content.decode('utf-8')
soup = BeautifulSoup(html,'html.parser')
title = soup.select(".text_mod h3")
print(title[0].text)
episodes = soup.select('.img a')
# print(episodes)
for each in range(1,len(episodes),3):
print(episodes[each]['title'],"link:"+episodes[each]['href'])
urls.append(episodes[each]['href'])
print("Get Each Episode Url Come Over !!!")
return urls
def getEachDLUrl(self):
urls = self.getEachEpisodeUrl()
links = []
for num,url in enumerate(urls):
response = self.openUrl(url)
html = response.text
# soup = BeautifulSoup(html, 'html.parser')
match = re.search(r'guid = "(\w+?)";', html)
pid = match.group(1)
# print(pid)
link = "http://vdn.apps.cntv.cn/api/getHttpVideoInfo.do?pid=%s&tz=%s&from=%s&url=%s&idl=%s&idlr=%s&modifyed=%s" %(pid,'-8','000news',url,'','','false')
links.append(link)
print("获取第%d集" %(num))
# print(urls)
return links
def getDLList(self):
"""
Get the download address for each episode of the TV play
:return:ownload address list
"""
links = self.getEachDLUrl()
# links = ["http://vdn.apps.cntv.cn/api/getHttpVideoInfo.do?pid=59381a0e55404cf5b101f7d3bcad2da8&tz=-8&from=000news&url=http://tv.cctv.com/2014/07/15/VIDE1405435161521590.shtml&idl=32&idlr=32&modifyed=false"]
dl_urls = []
for link in links:
dl_url = []
response = self.openUrl(link)
# html = response.content.decode('utf-8')
dl_list = response.json()['video']['chapters4']
for each in range(len(dl_list)):
downloadurl = dl_list[each]['url']
dl_url.append(downloadurl)
print(downloadurl)
dl_urls.append(dl_url)
return dl_urls
def _add_aria2_task(self, url, name):
"""
:param url:download url
:param name:dowmload tv name
:return:
"""
try:
result = rpc_addUri(url, {'out': name})
return result
except Exception as e:
print(e)
return None # response.json()['video']['lowChapters'][0]['url']
# response.json()['video']['chapters4'][0]['url']
"""
def dlTv(self): dl_urls_list = self.getDLList()
if os.path.exists("tv_list") == False:
os.mkdir("tv_list")
os.chdir("tv_list")
for dl_urls in dl_urls_list:
for dl_url in dl_urls:
print("download" + dl_url)
# response = self.openUrl(dl_url)
# with open("first.mp4",'ab') as tl:
# tl.write(response.content)
print("-"*20)
"""
if __name__ == "__main__":
cm = Cntv()
# cm.getUrl()
# cm.openUrl() lists = cm.getDLList()
for num,list in enumerate(lists):
for i,url in enumerate(list):
cm._add_aria2_task(url, str(num+1)+'_'+str(i+1)+'.mp4')

python爬虫--爬取cctv连续剧的更多相关文章

  1. Python爬虫 - 爬取百度html代码前200行

    Python爬虫 - 爬取百度html代码前200行 - 改进版,  增加了对字符串的.strip()处理 源代码如下: # 改进版, 增加了 .strip()方法的使用 # coding=utf-8 ...

  2. 用Python爬虫爬取广州大学教务系统的成绩(内网访问)

    用Python爬虫爬取广州大学教务系统的成绩(内网访问) 在进行爬取前,首先要了解: 1.什么是CSS选择器? 每一条css样式定义由两部分组成,形式如下: [code] 选择器{样式} [/code ...

  3. 使用Python爬虫爬取网络美女图片

    代码地址如下:http://www.demodashi.com/demo/13500.html 准备工作 安装python3.6 略 安装requests库(用于请求静态页面) pip install ...

  4. Python爬虫|爬取喜马拉雅音频

    "GOOD Python爬虫|爬取喜马拉雅音频 喜马拉雅是知名的专业的音频分享平台,用户规模突破4.8亿,汇集了有声小说,有声读物,儿童睡前故事,相声小品等数亿条音频,成为国内发展最快.规模 ...

  5. python爬虫爬取内容中,-xa0,-u3000的含义

    python爬虫爬取内容中,-xa0,-u3000的含义 - CSDN博客 https://blog.csdn.net/aiwuzhi12/article/details/54866310

  6. Python爬虫爬取全书网小说,程序源码+程序详细分析

    Python爬虫爬取全书网小说教程 第一步:打开谷歌浏览器,搜索全书网,然后再点击你想下载的小说,进入图一页面后点击F12选择Network,如果没有内容按F5刷新一下 点击Network之后出现如下 ...

  7. python爬虫—爬取英文名以及正则表达式的介绍

    python爬虫—爬取英文名以及正则表达式的介绍 爬取英文名: 一.  爬虫模块详细设计 (1)整体思路 对于本次爬取英文名数据的爬虫实现,我的思路是先将A-Z所有英文名的连接爬取出来,保存在一个cs ...

  8. 一个简单的python爬虫,爬取知乎

    一个简单的python爬虫,爬取知乎 主要实现 爬取一个收藏夹 里 所有问题答案下的 图片 文字信息暂未收录,可自行实现,比图片更简单 具体代码里有详细注释,请自行阅读 项目源码: # -*- cod ...

  9. python爬虫-爬取百度图片

    python爬虫-爬取百度图片(转) #!/usr/bin/python# coding=utf-8# 作者 :Y0010026# 创建时间 :2018/12/16 16:16# 文件 :spider ...

随机推荐

  1. LA 5009 (HDU 3714) Error Curves (三分)

    Error Curves Time Limit:3000MS    Memory Limit:0KB    64bit IO Format:%lld & %llu SubmitStatusPr ...

  2. 安装SDK后打开安卓project后有例如以下错误:发现了以元素 'd:skin' 开头的无效内容。此处不应含有子元素。

    Error: Error parsing D:\Program Files\SDK\android-sdk-windows\system-images\android-22\android-wear\ ...

  3. 【从0開始Tornado建站】0.9版本号python站点代码开源--持续更新中

            从5月份開始[从0開始Tornado建站]这个专栏,開始一点一点把这个分类兴趣站点弄起来,从无到有的过程也是令人兴奋的:-) 国庆的时候等待备案然后上线,如今站点域名为ustchack ...

  4. 10710 - Chinese Shuffle(数论+完美洗牌)

    UVA 10710 - Chinese Shuffle 题目链接 题意:给定n张牌,完美洗牌n - 1次,问是否会变回原来的序列 思路:完美洗牌: 如果有a1a2a3...anb1b2b3...bn的 ...

  5. Atitit.数据库分区的设计 attilax  总结

    Atitit.数据库分区的设计 attilax  总结 1. 分区就是分门别类的文件夹 (what)1 2. 分区的好处(y)1 3. 分区原则(要不要分区,何时分区)how2 4. 主要的分表类型有 ...

  6. python SimpleHTTPServer源码学习

    SimpleHTTPServer.SimpleHTTPRequestHandler继承了BaseHTTPServer.BaseHTTPRequestHandler. 源码中主要实现了BaseHTTPS ...

  7. constructors and destructors

    A constructor is a method that gets called immediately when an object is allocated (on the stack or ...

  8. windows 8.1 安装 .Net Framework 3.5

    1.挂载IOS虚拟光驱 2.命令提示符(管理员)   dism.exe /online /enable-feature /featurename:NetFX3 /Source:F:\sources\s ...

  9. python 面向对象类成员(字段 方法 属性)

    一.字段 字段包括:普通字段和静态字段,他们在定义和使用中有所区别,而最本质的区别是内存中保存的位置不同, 普通字段属于对象 静态字段属于类 class Province: # 静态字段 countr ...

  10. 安装nextant

    1. 安装java sudo apt-get install default-jre 然后用下面语句试试 java -version 2. 下载和解压solr,去https://lucene.apac ...