# !/usr/bin/env python
# encoding:UTF-8
from util import request_url
import re
import os
import sys
#from __future__ import print_function
from pptx import Presentation
from pptx.util import Inches
import PIL

class Crawler(object):
def __init__(self):
self.main_url = "https://mp.weixin.qq.com/s?__biz=MzA3NzIwMDM3OA==&mid=209853452&idx=1&sn=bd40e9622dca2e5bd52af08bbf870861&pass_ticket=8MmcYuwV6RkFHjUHOnxmzVg%2FEhQYTM26Zg%2FO2ZpgJVGyL6ewBt5fJc%2BEsNkytOiN"
self.media_content_pattern = re.compile('<div class="rich_media_content " id="js_content">.*?</div>',re.S)
self.item_pattern = re.compile('<p><a href="(.*?)" target=',re.S)
self.title_pattern = re.compile('<h2 class="rich_media_title" id="activity-name">(.*?)</h2>',re.S)
self.elements_content_pattern = re.compile('<p style=.*?</p>',re.S)
self.png_pattern = re.compile('data-src="(.*?)"',re.S)
self.datatype_pattern = re.compile('data-type="(.*?)"',re.S)
self.text_pattern = re.compile('>(.*?)</',re.S)
self.picid_pattern = re.compile('http://mmbiz.qpic.cn/mmbiz/(.*?)/',re.S)
self.pic_fmt_pattern = re.compile('wx_fmt=(.*?)$',re.S)
self.data_path = "../data/"
self.ppt_path = "../ppt/"

def get_item_list(self):
ret,main_page = request_url(self.main_url)
if ret == -1 or main_page == "":
print "Request main page failed!"
return
info = self.media_content_pattern.findall(main_page);
if len(info) != 0:
media_content = info[0]
else:
media_content = ""
item_list = []
if media_content:
item_info = self.item_pattern.findall(main_page);
if len(item_info) != 0:
for item_url in item_info:
print item_url
item_list.append(item_url)
title = self.get_item(item_url)
if title== "" and item_url.find("&amp;")!=-1:
item_url = item_url.replace("&amp;","&")
title = self.get_item(item_url)

def get_item(self,item_url):
ret,item_page = request_url(item_url)
if ret == -1 or item_page == "":
print "Request item page failed! %s" % item_url
return "bad"

info = self.title_pattern.findall(item_page);
if len(info) != 0:
title = info[0].strip().replace("(ppt)","")
else:
title = ""

item_path = self.data_path+"bak"
ppt_file = self.ppt_path+"bak"
if title!="":
item_path = self.data_path + title
ppt_file = self.ppt_path+ title + ".pptx"
else:
print "title is null!%s" % item_url
return ""
if os.path.exists(ppt_file):
return "exist"
info = self.media_content_pattern.findall(item_page);
if len(info) != 0:
media_content = info[0]
else:
media_content = ""
if media_content == "":
return
info = self.elements_content_pattern.findall(media_content);
element_tuple_list = []
for element_content in info:
if element_content.find('data-src="http://')!=-1:
element_type = "png"
else:
element_type = "text"
element_data = ""
if element_type == "png":
info = self.png_pattern.findall(element_content);
if len(info) != 0:
element_data = info[0]
if element_data.find("wx_fmt")==-1:
info = self.datatype_pattern.findall(element_content)
if len(info)>0:
element_data += "?wx_fmt=%s" % info[0]
else:
element_data = ""

else:
info = self.text_pattern.findall(element_content);
if len(info)>3:
element_data = ""
else:
element_data = "\n".join(info)
if element_data:
element_tuple_list.append((element_type,element_data))

if len(element_tuple_list) > 0:
if not os.path.exists(item_path):
os.makedirs(item_path)
text_data = ""
picfile_list = []
for element_tuple in element_tuple_list:
element_type,element_data = element_tuple
if element_type == "text":
text_data += element_data
else:
if element_data:
picfile = self.download_pic(element_data,item_path)
if picfile!="":
picfile_list.append(picfile)
self.write_text_content(text_data,item_path)
self.creat_ppt(title,ppt_file,picfile_list)
return title
def download_pic(self,url,path):
ret,pic_page = request_url(url)
if ret == -1 or pic_page=="":
return ""
info = self.picid_pattern.findall(url)
if len(info)>0 and info[0]!="":
picid = info[0]
else:
picid = url.replace("/","_").replace(":","_").replace(".","_").replace("?","_")
info = self.pic_fmt_pattern.findall(url)
if len(info)>0 and info[0]!="":
fmt = info[0].split("&")[0]
fmt = fmt.split("?")[0]
else:
print "Get pic fmt failed!%s" % url
return ""
filename_bak = "../data/" + picid + "_bak.%s" % fmt

fp = open(filename_bak,"w")
fp.write(pic_page)
fp.close()
pil_image = PIL.Image.open(filename_bak)
w, h = pil_image.size
w_box = 720
h_box = 540
filename = path + "/" + picid + ".%s" % fmt
self.resize(w, h, w_box, h_box, pil_image,filename)
os.system("rm -rf %s" % filename_bak)
return filename

def resize(self,w, h, w_box, h_box, pil_image,outfile):
'''
resize a pil_image object so it will fit into
a box of size w_box times h_box, but retain aspect ratio
'''
f1 = 1.0*w_box/w # 1.0 forces float division in Python2
f2 = 1.0*h_box/h
factor = min([f1, f2])
#print(f1, f2, factor) # test
# use best down-sizing filter
width = int(w*factor)
height = int(h*factor)
out = pil_image.resize((width, height), PIL.Image.ANTIALIAS)
out.save(outfile)

def write_text_content(self,text_data,path):
filename = path + "/" + "text_data.txt"
fp = open(filename,"w")
fp.write(text_data)
fp.close()

def creat_ppt(self,title_content,ppt_file,picfile_list):
prs = Presentation("default.pptx")
title_slide_layout = prs.slide_layouts[0]
slide = prs.slides.add_slide(title_slide_layout)
title = slide.shapes.title
title.text = title_content
graph_slide_layout = prs.slide_layouts[6]
for picfile in picfile_list:
slide = prs.slides.add_slide(graph_slide_layout)
slide.shapes.add_picture(picfile,0,0)
prs.save(ppt_file)

def test_ppt():
picfile = "test.jpeg"
for i in xrange(12):
try:
prs = Presentation("default.pptx")
graph_slide_layout = prs.slide_layouts[i]
slide = prs.slides.add_slide(graph_slide_layout)
placeholder = slide.placeholders[0]
#pic = placeholder.insert_picture(picfile)
prs.save("../ppt/%s.pptx" % i)
except:
continue

def test_layout(i):
picfile = "test.jpeg"
prs = Presentation("default.pptx")
graph_slide_layout = prs.slide_layouts[i]
slide = prs.slides.add_slide(graph_slide_layout)
placeholder = slide.placeholders[1]
pic = placeholder.insert_picture(picfile)
prs.save("../ppt/%s.pptx" % i)

def test_empty_layout():
picfile = "test.jpeg"
prs = Presentation("default.pptx")
graph_slide_layout = prs.slide_layouts[6]
slide = prs.slides.add_slide(graph_slide_layout)
slide.shapes.add_picture(picfile,0,0)
prs.save("../ppt/%s.pptx" % 6)
if __name__=="__main__":
#test_empty_layout()
#test_layout(int(sys.argv[1]))
#test_ppt()

crawler = Crawler()
#crawler.get_item_list()
item_url = "http://mp.weixin.qq.com/s?__biz=MzA3NzIwMDM3OA==&amp;mid=206906414&amp;idx=1&amp;sn=484555cf9c8efd164d06f6f6d0a6c19e&amp;scene=21#wechat_redirect"
item_url = item_url.replace("&amp;","&")
crawler.get_item(item_url)
print "done!"

util.py

# !/usr/bin/env python
# encoding:UTF-8
import urllib2
def request_url(url,repeat=3):
ret = -1#失败
content = ''
for cnt in xrange(repeat):
try:
req = urllib2.Request(url);
response = urllib2.urlopen(req)
content = response.read()
response.close()
ret = 0#成功
break;
except:
continue
result = (ret,content)
return result

crawler的更多相关文章

  1. A web crawler design for data mining

    Abstract The content of the web has increasingly become a focus for academic research. Computer prog ...

  2. [CareerCup] 10.5 Web Crawler 网络爬虫

    10.5 If you were designing a web crawler, how would you avoid getting into infinite loops? 这道题问如果让我们 ...

  3. Crawler & Ajax:WebBrowser C#

    Crawler 與 Ajax http://net.zdnet.com.cn/network_security_zone/2007/1005/536329.shtml WebBrowser: 利用We ...

  4. (92) Web Crawling: How can I build a web crawler from scratch? - Quora

    (92) Web Crawling: How can I build a web crawler from scratch? - Quora How can I build a web crawler ...

  5. (92) Is there a better crawler than Scrapy? - Quora

    (92) Is there a better crawler than Scrapy? - Quora Is there a better crawler than Scrapy?Edit

  6. 使用Crawler框架搭建自己的爬虫框架MyCrawler

    自己写一个爬虫框架的目的: 完美架构 在实际的数据采集编码过程中,发现代码比较乱,抓取数据,存储数据的代码混杂在一起,为了构建比较完美的数据采集框架 敏捷开发 将数据采集进行标准流程化,每个标准流程都 ...

  7. Py之Crawler:爬虫利用随机选取代理访问服务器的方法实现下载某网址上所有的图片到指定文件夹——Jason niu

    #Py之Crawler:爬虫利用随机选取代理访问服务器的方法实现下载某网址上所有的图片到指定文件夹 import urllib.request import os import random def ...

  8. 使用Node.js搭建数据爬虫crawler

    0. 通用爬虫框架包括: (1) 将爬取url加入队列,并获取指定url的前端资源(crawler爬虫框架主要使用Crawler类进行抓取网页) (2)解析前端资源,获取指定所需字段的值,即获取有价值 ...

  9. [开源 .NET 跨平台 Crawler 数据采集 爬虫框架: DotnetSpider] [一] 初衷与架构设计

    [DotnetSpider 系列目录] 一.初衷与架构设计 二.基本使用 三.配置式爬虫 四.JSON数据解析与配置系统 五.如何做全站采集 为什么要造轮子 同学们可以去各大招聘网站查看一下爬虫工程师 ...

随机推荐

  1. CSS3伪类

    1.:last-child 比如:查找ul的最后一个li ul li:last-child { //样式 } 2.:first-child 比如:查找ul的第一个li ul li:first-chil ...

  2. C++ 单链表基本操作

    链表一直是面试的高频题,今天先总结一下单链表的使用,下节再总结双向链表的.本文主要有单链表的创建.插入.删除节点等. 1.概念 单链表是一种链式存取的数据结构,用一组地址任意的存储单元存放线性表中的数 ...

  3. AutoEventWireup="false"

    在 Web 服务器控件中,某些事件(通常是 Click 事件)会导致窗体被回发到服务器.HTML 服务器控件和 Web 服务器控件(如 TextBox 控件)中的更改事件将被捕获,但不会立即导致发送. ...

  4. CICS&&XA

    CICS (Customer Information Control System) 是IBM 公司的强大主机交易服务器.集成平台,在全球C.C++.COBOL等交易中间件市场上占有绝大多数客户.CI ...

  5. 3月3日(6) Climbing Stairs

    原题 Climbing Stairs 求斐波那契数列的第N项,开始想用通项公式求解,其实一个O(n)就搞定了. class Solution { public: int climbStairs(int ...

  6. [sql server] 如何阻止SELECT * 语句

    我们每个人都知道是个不好的做法,但有时我们还是要这样做:我们执行SELECT * 语句.这个方法有很多弊端: 你从你的表里返回每个列,甚至后期加的列.想下如果你的查询里将来加上了VARCHAR(MAX ...

  7. zedboard之GPIO驱动(从FPGA一直到LINUX应用)

    1 EDK 大家知道我们在EDK中建立GPIO然后倒出到SDK中,在SDK中可以用C语言操作外设GPIO,但是这还是裸机程序,没有用到LINUX.本文将记录从FPGA  EDK生成GPIO一直到导入S ...

  8. ADO.NET笔记——执行事务

    相关知识: 处于同一事务(Transaction)内的一组操作,要么都成功执行,最后完全提交:但如果只要有任何一个操作失败或者出问题,所有值钱执行的操作也都取消并恢复到初始状态(即回滚) SqlTra ...

  9. C# 在子线程中创建不会阻塞执行窗体

    可以参考”C# 对 Windows 窗体控件进行线程安全调用“一文来看. 在做网络连接程序的时候碰到一个问题:每当连接到来时,都创建一个新的接收线程,而该接收线程在接收到消息时,可以创建一个新的对话窗 ...

  10. asp.net多图片上传实现程序代码

    下面是一个完整的asp.net同时支持多图片上传一个实现,有需要的朋友可参考一下,本文章限制同时可上传8张图片,当然大可自己可修改更多或更少. 前台代码如下: 复制代码代码如下: <% @ Pa ...