网页下载器

# coding:utf-8
import requests
import urllib2
import sys
type = sys.getfilesystemencoding()
class HtmlDownloader(object):

def download(slef, url):

if url is None:
return None

user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'

headers = {'User-Agent': user_agent}
req = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(req)
if response.getcode() == 200:
html = response.read()
return html

return None

网页解析器

# coding:utf-8
import re
import json
class HtmlParser(object):

def parser_url(self, page_url, response):

pattern = re.compile(r'(http://movie.mtime.com/(\d+)/)')
urls = pattern.findall(response)
if urls != None:
# 将urls进行去重
return list(set(urls))
else:
return None

# 解析异步响应值
def parser_json(self, page_url, response):

# 将"="和";"之间的内容提取出来
pattern = re.compile(r'=(.*?);')
result = pattern.findall(response)[0]

if result != None:
value = json.loads(result)
try:
isRelease = value.get('value').get('isRelease')
except Exception, e:
print e
return None
if isRelease:
if value.get('value').get('releaseType') == None:
return self._parser_release(page_url, value)
else:
return self._parser_no_release(page_url, value, isRelease=2)
else:

return self._parser_no_release(page_url, value)

def _parser_release(self, page_url, value):

try:
isRelease = 1
movieRating = value.get('value').get('movieRating')
boxOffice = value.get('value').get('boxOffice')
moveTitle = value.get('value').get('moveTitle')
RPictureFinal = movieRating.get('RPictureFinal')
RStoryFinal = movieRating.get('RStoryFinal')
RDirectorFinal = movieRating.get('RDirectorFinal')
ROtherFinal = movieRating.get('ROtherFinal')
RathingFinal = movieRating.get('RarhingFinal')

MovieId = movieRating.get('MoviedId')
Usercount = movieRating.get('Usercount')
AttitudeCount = movieRating.get('AttitudeCount')

TotalBoxOffice = boxOffice.get('TotalBoxOffice')
TotalBoxOfficeUnit = boxOffice.get('TotalBoxOfficeUnit')
TodayBoxOffice = boxOffice.get('TodayBoxOffice')
TodayBoxOfficeUnit = boxOffice.get('TodayBoxOfficeUnit')

ShowDays = boxOffice.get('ShowDays')

try:

Rank = boxOffice.get('Rank')
except Exception, e:
Rank = 0

return (
MovieId, moveTitle, RathingFinal, ROtherFinal, RPictureFinal, RDirectorFinal, RStoryFinal, Usercount,
AttitudeCount
, TotalBoxOffice + TotalBoxOfficeUnit, TodayBoxOffice + TodayBoxOfficeUnit, Rank, ShowDays, isRelease)
except Exception, e:
print e, page_url, value

return None

# 解析未上映的电影信息
def _parser_no_release(self, page_url, value, isRelease=0):

try:
movieRating = value.get('value').get('movieRating')
moveTitle = value.get('value').get('movieTitle')
RPictureFinal = movieRating.get('RPictureFinal')
RStoryFinal = movieRating.get('RStoryFinal')
RDirectorFinal = movieRating.get('RDirectorFinal')
ROtherFinal = movieRating.get('ROtherFinal')
RatingFinal = movieRating.get('RatingFinal')

MovieId = movieRating.get('MovieId')
Usercount = movieRating.get('Usercount')
AttitudeCount = movieRating.get('AttitudeCount')

try:

Rank = 0

except Exception, e:
Rank =0
return (
MovieId, moveTitle, RatingFinal, ROtherFinal, RPictureFinal, RDirectorFinal, RStoryFinal,
Usercount,
AttitudeCount
, u'无', u'无', Rank, 0, isRelease)

except Exception, e:

print e, page_url, value

return None

 数据存储器

# coding:utf-8
import MySQLdb

class DataOutput(object):

def __init__(self):
self.con =MySQLdb.connect(host='127.0.0.1', user='root', passwd='', db='go',port=3306,charset='utf8')
self.cx = self.con.cursor()
self.create_table('MTime')
self.datas = []

def create_table(self, table_name):

values = "id int(11) not null primary key auto_increment," \
"MovieId int(11),"\
"MovieTitle varchar(40) NOT NULL," \
"RatingFinal double NOT NULL DEFAULT 0.0," \
"ROtherFinal double NOT NULL DEFAULT 0.0," \
"RPictureFinal double NOT NULL DEFAULT 0.0," \
"RDirectorFinal double NOT NULL DEFAULT 0.0," \
"RStoryFinal double NOT NULL DEFAULT 0.0," \
"Usercount int(11) NOT NULL DEFAULT 0," \
"AttitudeCount int(11) NOT NULL DEFAULT 0," \
"TotalBoxOffice varchar(20) NOT NULL," \
"TodayBoxOffice varchar(20) NOT NULL," \
"Rank int(11) NOT NULL DEFAULT 0," \
"ShowDays int(11) NOT NULL DEFAULT 0," \
"isRelease int(11) NOT NULL" \
""
#print 'CREATE TABLE IF NOT EXISTS %s(%s)' % (table_name, values)

self.cx.execute('CREATE TABLE IF NOT EXISTS %s(%s) ENGINE=InnoDB DEFAULT CHARSET=utf8' % (table_name, values))

def store_data(self, data):

if data is None:
return
self.datas.append(data)
if len(self.datas) > 10:
self.output_db('MTime')

def output_db(self, table_name):
for data in self.datas:
self.cx.execute("INSERT INTO MTime (MovieId,MovieTitle,RatingFinal,ROtherFinal,RPictureFinal,RDirectorFinal,"
"RStoryFinal,Usercount,AttitudeCount,TotalBoxOffice,TodayBoxOffice,Rank,ShowDays,isRelease) "
"VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",data)

self.datas.remove(data)

self.con.commit()
self.con.close()

def output_end(self):

if len(self.datas) > 0:
self.output_db('MTime')

self.cx.close()

 爬虫调度器

# coding:utf-8
from UrlManager import UrlManager
from DataOutput import DataOutput
from HtmlDownloader import HtmlDownloader
from HtmlParser import HtmlParser
import time
class SpiderMan(object):

def __init__(self):

self.downloader = HtmlDownloader()
self.parser = HtmlParser()
self.output = DataOutput()

def crawl(self,root_url):

content = self.downloader.download(root_url)

urls = self.parser.parser_url(root_url,content)

for url in urls:

try:
t= time.strftime("%Y%m%d%H%M%S3282",time.localtime())
rank_url ="http://service.library.mtime.com/Movie.api?" \
"Ajax_CallBack=true" \
"&Ajax_CallBackType=Mtime.Library.Services" \
"&Ajax_CallBackMethod=GetMovieOverviewRating" \
"&Ajax_CrossDomain=1" \
"&Ajax_RequestUrl=%s" \
"&t=%s" \
"&Ajax_CallBackArgument0=%s" %(url[0],t,url[1])

#print rank_url
#exit()
rank_content = self.downloader.download(rank_url)

data = self.parser.parser_json(rank_url,rank_content)

self.output.store_data(data)
except Exception,e:
print e
self.output.output_end()
print "Crawl finish"

if __name__ == '__main__':

spider = SpiderMan()
spider.crawl('http://theater.mtime.com/China_Beijing/')

Python动态网站的抓取的更多相关文章

  1. Nutch的配置以及动态网站的抓取

    http://blog.csdn.net/jimanyu/article/details/5619949 一:配置Nutch: 1.解压缩的nutch后,以抓取http://www.163.com/为 ...

  2. 爬虫---selenium动态网页数据抓取

    动态网页数据抓取 什么是AJAX: AJAX(Asynchronouse JavaScript And XML)异步JavaScript和XML.过在后台与服务器进行少量数据交换,Ajax 可以使网页 ...

  3. 如何让Python爬虫一天抓取100万张网页

    前言 文的文字及图片来源于网络,仅供学习.交流使用,不具有任何商业用途,版权归原作者所有,如有问题请及时联系我们以作处理. 作者: 王平 源自:猿人学Python PS:如有需要Python学习资料的 ...

  4. WordPress快速增加百度收录,加快网站内容抓取

    本文已同步到专业技术网站 www.sufaith.com, 该网站专注于前后端开发技术与经验分享, 包含Web开发.Nodejs.Python.Linux.IT资讯等板块. 利用百度站长平台提供的链接 ...

  5. python&php数据抓取、爬虫分析与中介,有网址案例

    近期在做一个网络爬虫程序.后台使用python不定时去抓取数据.前台使用php进行展示 站点是:http://se.dianfenxiang.com

  6. C# 从需要登录的网站上抓取数据

    [转] C# 从需要登录的网站上抓取数据 背景:昨天一个学金融的同学让我帮她从一个网站上抓取数据,然后导出到excel,粗略看了下有1000+条记录,人工统计的话确实不可能.虽说不会,但作为一个学计算 ...

  7. Python:利用 selenium 库抓取动态网页示例

    前言 在抓取常规的静态网页时,我们直接请求对应的 url 就可以获取到完整的 HTML 页面,但是对于动态页面,网页显示的内容往往是通过 ajax 动态去生成的,所以如果是用 urllib.reque ...

  8. python requests 模拟登陆网站,抓取数据

    抓取页面数据的时候,有时候我们需要登陆才可以获取页面资源,那么我们需要登陆以后才可以跳转到对应的资源页面,那么我们需要通过模拟登陆,登陆成功以后再次去抓取对应的数据. 首先我们需要通过手动方式来登陆一 ...

  9. 基于selenium+phantomJS的动态网站全站爬取

    由于需要在公司的内网进行神经网络建模试验(https://www.cnblogs.com/NosenLiu/articles/9463886.html),为了更方便的在内网环境下快速的查阅资料,构建深 ...

随机推荐

  1. [TypeScript] Deeply mark all the properties of a type as read-only in TypeScript

    We will look at how we can use mapped types, conditional types, self-referencing types and the “infe ...

  2. python对象序列化或持久化的方法

    http://blog.csdn.net/chen_lovelotus/article/details/7233293 一.Python对象持久化方法 目前为止,据我所知,在python中对象持久化有 ...

  3. Node.js meitulu图片批量下载爬虫1.06版

    //====================================================== // https://www.meitulu.com图片批量下载Node.js爬虫1. ...

  4. 注册表 API 以及开机自启动

    注册表是window系统中非常重要的一部分,今天在网上查了一些文章学习了下,觉得其中有一句话总结的很经典:注册表是用来存储信息的. 这句话虽然有点废,但是说的没错.当然,注册表中包含的内容非常多,远没 ...

  5. js apply和call区别

    <!DOCTYPE html> <html lang="zh"> <head> <meta charset="UTF-8&quo ...

  6. 51单片机 | SPI协议与应用实例

    ———————————————————————————————————————————— SPI总线 - - - - - - - - - - - - - - - - - - - - - - - - - ...

  7. iOS CoreImage之滤镜简单使用

    代码地址如下:http://www.demodashi.com/demo/11605.html 老骥伏枥,志在千里 前记 最近一直在研究图像处理方面,既上一篇iOS Quart2D绘图之UIImage ...

  8. iOS学习笔记-自己动手写RESideMenu

    代码地址如下:http://www.demodashi.com/demo/11683.html 很多app都实现了类似RESideMenu的效果,RESideMenu是Github上面一个stars数 ...

  9. [1-7] 把时间当做朋友(李笑来)Chapter 7 【从此时此刻开始改变】 摘录

    大多数事情都需要提前准备,也都可以提前准备.认识到这一点本身就几乎是一切改变的起点. 任何动作演练到一定的次数,就能做到甚至在无意识的情况下都可以准确完成的地步.而他只不过是把这个原理应用到了极致而已 ...

  10. Mysql纯命令行添加用户及权限

    原文链接:http://hi.baidu.com/ngy1991/item/45978bd05d82c7ea785daa42 创建用户: 命令: 1 CREATE USER 'username'@'h ...