Python 爬取所有51VOA网站的Learn a words文本及mp3音频
Python 爬取所有51VOA网站的Learn a words文本及mp3音频
- #!/usr/bin/env python
- # -*- coding: utf-8 -*-
- #Python 爬取所有51VOA网站的Learn a words文本及mp3音频
- import os
- import sys
- import time
- import urllib as req
- from threading import Thread
- import urllib2
- import urllib
- from threading import Thread
- import xml
- import re
- class MyWorkThread(Thread, urllib.FancyURLopener):
- """
- Multi-thread downloading class.
- run() is a vitual method of Thread
- """
- def __init__(self, threadname, url, filename, ranges = 0):
- Thread.__init__(self, name = threadname)
- urllib.FancyURLopener.__init__(self)
- self.name = threadname
- self.url = url
- self.filename = filename
- self.ranges = ranges
- self.downloaded = 0
- def run(self):
- """
- virtual function in Thread
- """
- try:
- self.downloaded = os.path.getsize(self.filename)
- except OSError:
- self.downloaded = 0
- #rebuild start point
- self.startpoint = self.ranges[0] + self.downloaded
- #if this part is completed
- if self.startpoint >= self.ranges[1]:
- print 'Part %s has been downloaded over.' % self.filename
- return
- self.oneTimeSize = 8 * 1024 #8K bytes / time
- print 'task %s will download from %d to %d' %(self.name, self.startpoint, self.ranges[1])
- self.addheader('Range', 'bytes=%d-%d' %(self.startpoint, self.ranges[1]))
- self.urlhandle = self.open(self.url)
- data = self.urlhandle.read(self.oneTimeSize)
- while data:
- filehandle = open(self.filename, 'ab+')
- filehandle.write(data)
- filehandle.close()
- self.downloaded += len(data)
- data = self.urlhandle.read(self.oneTimeSize)
- def GetUrlFileSize(url):
- urlHandler = urllib.urlopen(url)
- headers = urlHandler.info().headers
- length = 0
- for header in headers:
- if header.find('Length') != -1:
- length = header.split(':')[-1].strip()
- length = int(length)
- return length
- def SpliteBlocks(totalsize, blocknumber):
- blocksize = totalsize / blocknumber
- ranges = []
- for i in range(0, blocknumber -1):
- ranges.append((i * blocksize, i * blocksize + blocksize -1))
- ranges.append((blocksize * (blocknumber -1), totalsize -1))
- return ranges
- def isLive(tasks):
- for task in tasks:
- if task.isAlive():
- return True
- return False
- def downLoadFile(url, output, blocks = 6):
- sys.stdout.write('Begin to download from %s\n' %url )
- sys.stdout.flush()
- size = GetUrlFileSize(url)
- ranges = SpliteBlocks(size, blocks)
- threadname = ["thread_%d" %i for i in range(0, blocks)]
- filename = ["tmpfile_%d" %i for i in range(0, blocks)]
- tasks = []
- for i in range(0, blocks):
- task = MyWorkThread(threadname[i], url, filename[i], ranges[i])
- task.setDaemon(True)
- task.start()
- tasks.append(task)
- time.sleep(2)
- while isLive(tasks):
- downloaded = sum([task.downloaded for task in tasks])
- process = downloaded / float(size) * 100
- show = u'\rFilesize: %d Downloaded:%d Completed: %.2f%%' %(size, downloaded, process)
- sys.stdout.write(show)
- sys.stdout.flush
- time.sleep(1)
- output = formatFileName(output)
- filehandle = open(output, 'wb+')
- for i in filename:
- f = open(i, 'rb')
- filehandle.write(f.read())
- f.close()
- os.remove(i)
- filehandle.close()
- sys.stdout.write("Completed!\n")
- sys.stdout.flush()
- def formatFileName(filename):
- if isinstance(filename, str):
- header, tail = os.path.split(filename)
- if tail != '':
- tuple = ('\\','/',':','*', '?', '"', '<', '>', '|')
- for char in tuple:
- if tail.find(char) != -1:
- tail = tail.replace(char, ' ')
- filename = os.path.join(header, tail)
- #print filename
- return filename
- else:
- return 'None'
- def remove_tags(raw_html):
- cleanr =re.compile('<.*?>')
- cleantext = re.sub(cleanr,'', raw_html)
- return cleantext
- def saveword(url,name):
- res=req.urlopen(url)
- data=res.readlines()
- res.close()
- startag=r'id="mp3"'
- endtag=r'</div>'
- k=80
- data2=''
- data3=''
- data4=''
- while k<len(data)-10:
- if(data[k].find(startag)!=-1):
- data2=data[k]
- if(data[k].find('<div id="content">')!=-1):
- data3=data[k]
- if(data[k+1].find('<p>')!=-1):
- data4=data[k+1]
- # if(data4.rfind('...')!=-1):
- # endid = data4.find('...')+3
- # else:
- # endid = data4.find('</p>')
- # data4 = data4[3:endid]
- data4=remove_tags(data4)
- k=k+1
- # print data2
- ## data=str(data)
- ## data2=data[(data.find(startag)+14):data.lower().find(endtag)+3]
- ## data3=data[105]
- # print data3
- mp3url=data2[data2.find('http'):data2.find(''' title="''')-1]
- if(data3.find(endtag)!=-1):
- sent = data3[data3.find('今天我们要学'):data3.find(endtag)]
- else:
- sent = data3[data3.find('今天我们要学'):].strip('\n').strip('\r')+data4.strip('\n')
- # sent = sent.replace('\n','. ')
- # print mp3url,sent
- f=open('LearningWord.txt','a+')
- sent=remove_tags(sent)
- f.write(name+'\n'+sent.strip('\r')+'\n')
- f.close()
- # print str(name)+'.mp3'
- if(data2.find(startag)!=-1):
- downLoadFile(mp3url,str(formatFileName(name.replace(':', ' ')))+'.mp3', blocks = 4)
- def savepage(url):
- res=req.urlopen(url)
- data=res.read()
- res.close()
- startag='''<ul><li>'''
- endtag='''</li></ul>'''
- data=str(data)
- data2=data[data.find(startag)+12:data.find(endtag)]
- linestart='href'
- meddle = '''" target'''
- lineend = '</a>'
- urls=[]
- words = []
- i=data2.find(linestart)
- while(i!=-1):
- k = data2.find(meddle)
- j = data2.find(lineend)
- url = 'http://www.51voa.com/'+data2[i+6:k]
- urls = urls+[url]
- word = data2[k+16:j]
- print i,k,j, word,url
- words = words + [word]
- data2=data2[j+3:]
- saveword(url,word)
- i=data2.find(linestart)
- # break
- #下载所有单词
- f=open('LearningWord.txt','w')
- f.close()
- i=53
- while i<=54:
- url = 'http://www.51voa.com/Learn_A_Word_'+str(i)+'.html'
- savepage(url)
- i=i+1
- #下载指定单词
- #url = "http://www.51voa.com/Voa_English_Learning/Learn_A_Word_21951.html"
- #name ='9:pop up'
- #saveword(url,name)
下载单词文本示例:(全部单词文本下载地址:http://pan.baidu.com/s/1o8pmojS)
- 2650 endorse
- 今天我们要学的词是 endorse. Endorse 作为动词,有支持的意思。Senator Ted Cruz endorsed Donald Trump, but later said the decision was “agonizing.” 美国联邦参议员克鲁兹支持川普,但是后来又表示,他做出这一决定十分痛苦。The New York Times endorsed Hillary Clinton for president in a Saturday editorial, and dismissed Donald Trump as “the worst nominee put forward by a major party in modern American history.” 纽约时报在星期六的社论中支持希拉里.克林顿当总统,并批评说,川普是“美国现代史上主要政党推举出的最差劲的候选人”。好的,我们今天学习的词是 endorse, endorse, endorse...
- 2649 deportation
- 今天我们要学的词是 deportation. Deportation 名词,驱逐出境,递解出境。The Obama administration said it would fully resume deportations of undocumented Haitian immigrants. 奥巴马政府表示,将全面恢复对无证海地移民的遣返工作。China and Canada have reached a new border agreement that would speed up the deportation of Chinese nationals inadmissible in Canada. 中国和加拿大达成新的边境协议,加快遣返那些本不该被允许进入加拿大的中国公民。好的,我们今天学习的词是 deportation, deportation, deportation...
- 2648 voluntarily
- 今天我们要学的词是 voluntarily. Voluntarily 副词,自愿地。The International Organization for Migrants says that more people are voluntarily returning to their home countries. 国际移民组织说,越来越多的人开始自愿返回自己的祖国。A high-tech diagnostic company voluntarily withdrew its Zika virus blood test from FDA approval. 一家高科技诊断公司自愿撤回递交美国食品药物管理局的寨卡病毒血液检测批准申请。好的,我们今天学习的词是 voluntarily, voluntarily, voluntarily...
- 2647 guerrilla
- 今天我们要学的词是 guerrilla. Guerrilla 形容词,游击队的。The Columbian government signed a peace agreement on Monday with the Revolutionary Armed Forces of Columbia (FARC), a national guerrilla movement. 哥伦比亚政府星期一跟全国游击队运动“哥伦比亚革命武装力量”签署了和平协议。The agreement needs to be approved by an Oct. 2 referendum before roughly 7,000 guerrilla fighters start their transition to civilian life. 这项协议还需经过10月2号全民公决批准,大约七千名游击队员才会开始向平民生活过渡。好的,我们今天学习的词是 guerrilla, guerrilla, guerrilla...
- 2646 curfew
- 今天我们要学的词是 curfew. Curfew 名词,宵禁。The city of Charlotte in North Carolina has lifted its midnight curfew, but the state of emergency remains in effect. 北卡罗来纳州夏洛特市取消了午夜宵禁,但是紧急状态依旧生效。Authorities in an Austrian city imposed a curfew on young immigrants following a series of sexual attacks at a local beer and wine festival. 奥地利一个城市的有关当局对未成年移民实施宵禁,此前当地一个啤酒葡萄酒节期间发生了一系列性侵事件。 好的,我们今天学习的词是 curfew, curfew, curfew...
- 2645 estimate
- 今天我们要学的词是 estimate. Estimate 动词,估计。A recent study estimates that the Indonesian forest fires that created a smoky haze last year may have caused more than 100,000 premature deaths. 一项最新研究估计,去年印尼山火引发的雾霾可能造成了10万人过早死亡。A new survey estimates that Americans own 265 million guns, but half of these guns are in the hands of only 3% of Americans. 最新调查估计,美国人拥有枪支总数2.65亿支,但其中半数都集中在3%的人手中。好的,我们今天学习的词是 estimate, estimate, estimate...
- 2644 mercy killing
- 今天我们要学的词是 mercy killing. Mercy killing 名词,安乐死。A terminally ill 17-year-old has become the first minor to be euthanized in Belgium since the age restrictions on such mercy killings were lifted in 2014. 比利时一个17岁绝症男孩安乐死,他是比利时2014年取消对安乐死年龄限制以来第一个安乐死的未成年人。The United Arab Emirates passed a new law banning all mercy killings. 阿联酋通过新法律,禁止安乐死。好的,我们今天学习的词是 mercy killing, mercy killing, mercy killing...
Python 爬取所有51VOA网站的Learn a words文本及mp3音频的更多相关文章
- [Python]爬取 游民星空网站 每周精选壁纸(1080高清壁纸) 网络爬虫
一.检查 首先进入该网站的https://www.gamersky.com/robots.txt页面 给出提示: 弹出错误页面 注: 网络爬虫:自动或人工识别robots.txt,再进行内容爬取 约束 ...
- python爬取中国天气网站数据并对其进行数据可视化
网址:http://www.weather.com.cn/textFC/hb.shtml 解析:BeautifulSoup4 爬取所有城市的最低天气 对爬取的数据进行可视化处理 按温度对城市进行排 ...
- python爬取网站数据
开学前接了一个任务,内容是从网上爬取特定属性的数据.正好之前学了python,练练手. 编码问题 因为涉及到中文,所以必然地涉及到了编码的问题,这一次借这个机会算是彻底搞清楚了. 问题要从文字的编码讲 ...
- Python开发爬虫之BeautifulSoup解析网页篇:爬取安居客网站上北京二手房数据
目标:爬取安居客网站上前10页北京二手房的数据,包括二手房源的名称.价格.几室几厅.大小.建造年份.联系人.地址.标签等. 网址为:https://beijing.anjuke.com/sale/ B ...
- 利用Python爬取电影网站
#!/usr/bin/env python #coding = utf-8 ''' 本爬虫是用来爬取6V电影网站上的电影资源的一个小脚本程序,爬取到的电影链接会通过网页的形式显示出来 ''' impo ...
- python爬取网站数据保存使用的方法
这篇文章主要介绍了使用Python从网上爬取特定属性数据保存的方法,其中解决了编码问题和如何使用正则匹配数据的方法,详情看下文 编码问题因为涉及到中文,所以必然地涉及到了编码的问题,这一次借这 ...
- python爬取某个网站的图片并保存到本地
python爬取某个网站的图片并保存到本地 #coding:utf- import urllib import re import sys reload(sys) sys.setdefaultenco ...
- Python轻松爬取Rosimm写真网站全部图片
RosimmImage 爬取Rosimm写真网站图片 有图有真相 def main_start(url): """ 爬虫入口,主要爬取操作 ""&qu ...
- 使用python爬取MedSci上的期刊信息
使用python爬取medsci上的期刊信息,通过设定条件,然后获取相应的期刊的的影响因子排名,期刊名称,英文全称和影响因子.主要过程如下: 首先,通过分析网站http://www.medsci.cn ...
随机推荐
- C#:获取环境信息
外部环境数据1.需要管理员权限2.需要安装office2003以上完整版3.需要安装flash reader 10.0以上4.需要安装adodb reader;Adobe Acrobat X Pro; ...
- 解决windows 10无法打开.hlp帮助文件的问题
最近学习UML,使用的Rational Rose 7.0,使用帮助时,才发现windows 10无法打开.hlp的帮助文件.虽然win10默认定向到微软支持页面,但发现通过下载补丁来查看hlp文件,实 ...
- [转]opencv3.0 鱼眼相机标定
[原文转自]:http://blog.csdn.net/qq_15947787/article/details/51441031 前两天发表的时候没注意,代码出了点错误,所以修改了一下,重新发上来. ...
- python:点赞功能
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/ ...
- jquery常用总结
1.遍历对象 n是属性 value是对应的值 $.each(param,function(n,value) { datas[n] = value; }); 2.获取select改变后的值 $('sel ...
- 定时备份SQL SERVER的数据库并且把备份文件复制到另外一台服务器
需求介绍:每天备份线上正式库并且把备份文件复制到测试服务器,测试服务器自动把数据库备份文件还原. 方案介绍: 第1步:在正式库上创建存储过程用来备份数据库和复制到测试服务器,然后新建作业每天定时执行创 ...
- c++多线程の数据竞争和互斥对象
看两个代码: void function() { ;i>;i--) { cout<<"from sub thread"+i<<endl; } } vo ...
- [bzoj 3732] Network (Kruskal重构树)
kruskal重构树 Description 给你N个点的无向图 (1 <= N <= 15,000),记为:1-N. 图中有M条边 (1 <= M <= 30,000) ,第 ...
- SQLite数据库
数据持久化: 1.文件存储 适合用于存储一些简单的文本数据或二进制数据 存储数据:openFileOutput(文件名,操作模式),返回值为一个FileOutputStream对象,借助FileOut ...
- 不定长链表队列C语言实现
#ifndef _CONST_H_#define _CONST_H_ #include <stdio.h>#include <stdlib.h> typedef enum { ...