1 #coding:utf-8
  2 
  3 """
  4 
  5 Created on 2015-9-17
  6 
  7  
  8 
  9 @author: huangxie
 10 
 11 """
 12 
 13 import time,math,os,re,urllib,urllib2,cookielib 
 14 
 15 from bs4 import BeautifulSoup
 16 
 17 import time  
 18 
 19 import re
 20 
 21 import uuid
 22 
 23 import json
 24 
 25 from threading import Thread
 26 
 27 from Queue import Queue 
 28 
 29 import MySQLdb as mdb
 30 
 31 import sys
 32 
 33 import threading
 34 
 35 import utils
 36 
 37 import imitate_browser
 38 
 39 from MySQLdb.constants.REFRESH import STATUS
 40 
 41 reload(sys)
 42 
 43 sys.setdefaultencoding('utf-8')
 44 
 45  
 46 
 47 DB_HOST = '127.0.0.1'
 48 
 49 DB_USER = 'root'
 50 
 51 DB_PASS = 'root'
 52 
 53 proxy = {u'http':u'222.39.64.13:8118'}
 54 
 55 TOP_URL="http://image.baidu.com/i?tn=resultjsonavatarnew&ie=utf-8&word={word}&pn={pn}&rn={rn}"
 56 
 57 KEYWORD_URL="https://www.baidu.com/s?ie=utf-8&f=8&tn=baidu&wd={wd}"
 58 
 59  
 60 
 61 """
 62 
 63 i_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
 64 
 65               'Accept':'json;q=0.9,*/*;q=0.8',
 66 
 67               'Accept-Charset':'utf-8;q=0.7,*;q=0.3',
 68 
 69               'Accept-Encoding':'gzip',
 70 
 71               'Connection':'close',
 72 
 73               'Referer':None #注意如果依然不能抓取的话,这里可以设置抓取网站的host
 74 
 75             }
 76 
 77 """
 78 
 79 i_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.48'}
 80 
 81  
 82 
 83 def GetDateString():
 84 
 85     x = time.localtime(time.time())
 86 
 87     foldername = str(x.__getattribute__("tm_year"))+"-"+str(x.__getattribute__("tm_mon"))+"-"+str(x.__getattribute__("tm_mday"))
 88 
 89     return foldername 
 90 
 91  
 92 
 93 class BaiduImage(threading.Thread):     
 94 
 95  
 96 
 97     def __init__(self):
 98 
 99         Thread.__init__(self)
 
         self.browser=imitate_browser.BrowserBase()
 
         self.chance=0
 
         self.chance1=0
 
         self.request_queue=Queue()
 
         self.wait_ana_queue=Queue()
 
         #self.key_word_queue.put((("动态图", 0, 24)))
 
         self.count=0
 
         self.mutex = threading.RLock() #可重入锁,使单线程可以再次获得已经获得的锁
 
         self.commit_count=0
 
         self.ID=500
 
         self.next_proxy_set = set()
 
         self.dbconn = mdb.connect(DB_HOST, DB_USER, DB_PASS, 'sosogif', charset='utf8')
 
         self.dbconn.autocommit(False)
 
         self.dbcurr = self.dbconn.cursor()
 
         self.dbcurr.execute('SET NAMES utf8')
 
         
 
     """
 
     def run(self):
 
         while True:
 
             self.get_pic()
 
     """
 
     
 
     def work(self,item):
 
         print "start thread",item
 
         while True: #MAX_REQUEST条以上则等待
 
             self.get_pic()
 
             self.prepare_request()
 
     
 
     def format_keyword_url(self,keyword):
 
   
 
         return KEYWORD_URL.format(wd=keyword).encode('utf-8') 
 
            
 
     def generateSeed(self,url):
 
         
 
         html = self.browser.openurl(url).read()
 
         if html:
 
             try:
 
                 soup = BeautifulSoup(html)
 
                 trs = soup.find('div', id='rs').find('table').find_all('tr') #获得所有行
 
                 for tr in trs:
 
                     ths=tr.find_all('th')
 
                     for th in ths:
 
                         a=th.find_all('a')[0]
 
                         keyword=a.text.strip()
 
                         if "动态图" in keyword or "gif" in keyword:
 
                             print "keyword",keyword
 
                             self.dbcurr.execute('select id from info where word=%s',(keyword))
 
                             y = self.dbcurr.fetchone()
 
                             if not y:
 
                                 self.dbcurr.execute('INSERT INTO info(word,status,page_num,left_num,how_many) VALUES(%s,0,0,0,0)',(keyword))
 
                     self.dbconn.commit()
 
             except:
 
                 pass
 
                 
 
                
 
     def prepare_request(self):
 
         self.lock()
 
         self.dbcurr.execute('select * from info where status=0')
 
         result = self.dbcurr.fetchone()
 
         if result:
 
             id,word,status,page_num,left_num,how_many=result
 
             self.request_queue.put((id,word,page_num)) 
 
             if page_num==0 and left_num==0 and how_many==0:
 
                 url=self.format_keyword_url(word)
 
                 self.generateSeed(url)
 
                 html=""
 
                 try:
 
                     url=self.format_top_url(word, page_num, 24)
 
                     html = self.browser.openurl(url).read()
 
                 except Exception as err:
 
                     print "err",err
 
                     #pass
 
                 if html!="":
 
                     how_many=self.how_many(html)
 
                     print "how_many",how_many
 
                     if how_many==None:
 
                         how_many=0
 
                     t=math.ceil(how_many/24*100) #只要前1/100即可
 
                     num = int(t)
 
                     for i  in xrange(0,num-1):
 
                         self.dbcurr.execute('INSERT INTO info(word,status,page_num,left_num,how_many) VALUES(%s,%s,%s,%s,%s)',(word,0,i*24,num-i,how_many))
 
                     self.dbcurr.execute('update info SET status=1 WHERE id=%s',(id)) #置为已经访问
 
                     self.dbconn.commit()
 
         self.unlock()
 
                 
 
             
 
     def start_work(self,req_max):
 
         for item in xrange(req_max):
 
             t = threading.Thread(target=self.work, args=(item,))
 
             t.setDaemon(True)
 
             t.start()
 
             
 
     def lock(self): #加锁
 
         self.mutex.acquire()
 
  
 
     def unlock(self): #解锁
 
         self.mutex.release()
 
  
 
     def get_para(self,url,key):
 
         values = url.split('?')[-1]
 
         for key_value in values.split('&'):
 
             value=key_value.split('=')
 
             if value[0]==key:
 
                 return value[1]
 
         return None  
 
     
 
     def makeDateFolder( self,par,child):
 
         #self.lock()
 
         if os.path.isdir( par ):
 
             path=par + '//' + GetDateString()
 
             newFolderName = path+'//'+child
 
             if not os.path.isdir(path):
 
                 os.mkdir(path)
 
             if not os.path.isdir( newFolderName ):
 
                 os.mkdir( newFolderName )
 
             return newFolderName
 
         else:
 
             return par 
 
         #self.unlock()
 
         
 
     def parse_json(self,data):
 
         
 
         ipdata = json.loads(data)
 
         try:
 
             if ipdata['imgs']:  
 
                 for n in ipdata['imgs']: #data子项 
 
                     if n['objURL']:  
 
                         try:
 
                             proxy_support = urllib2.ProxyHandler(proxy)
 
                             opener = urllib2.build_opener(proxy_support)
 
                             urllib2.install_opener(opener)
 
                             #print "proxy",proxy
 
                             self.lock()
 
                             self.dbcurr.execute('select ID from pic_info where objURL=%s', (n['objURL']))
 
                             y = self.dbcurr.fetchone()
 
                             #print "y=",y
 
                             if y:
 
                                 print "database exist"
 
                                 self.unlock() #continue 前解锁
 
                                 continue
 
                             else:
 
                                 real_extension=utils.get_extension(n['objURL'])
 
                                 req = urllib2.Request(n['objURL'],headers=i_headers)
 
                                 resp = urllib2.urlopen(req,None,5)
 
                                 dataimg=resp.read()
 
                                 name=str(uuid.uuid1())
 
                                 filename=""
 
                                 if len(real_extension)>4:
 
                                     real_extension=".gif"
 
                                 real_extension=real_extension.lower()
 
                                 if real_extension==".gif":
 
                                     filename  =self.makeDateFolder("E://sosogif", "d"+str(self.count % 60))+"//"+name+"-www.sosogif.com-搜搜gif贡献"+real_extension
 
                                     self.count+=1
 
                                 else:
 
                                     filename  =self.makeDateFolder("E://sosogif", "o"+str(self.count % 20))+"//"+name+"-www.sosogif.com-搜搜gif贡献"+real_extension
 
                                     self.count+=1
 
                                 """
 
                                 name=str(uuid.uuid1())
 
                                 filename=""
 
                                 if len(real_extension)>4:
 
                                     real_extension=".gif"
 
                                 filename  =self.makeDateFolder("E://sosogif", "d"+str(self.count % 60))+"//"+name+"-www.sosogif.com-搜搜gif贡献"+real_extension
 
                                 self.count+=1 
 
                                 """
 
                                 try: 
 
                                     if not os.path.exists(filename): 
 
                                         file_object = open(filename,'w+b')  
 
                                         file_object.write(dataimg)  
 
                                         file_object.close()
 
                                         self.anaylis_info(n,filename,real_extension) #入库操作
 
                                     else:
 
                                         print "file exist" 
 
                                 except IOError,e1:  
 
                                     print "e1=",e1
 
                                     pass
 
                             self.unlock()
 
                         except IOError,e2:  
 
                             #print "e2=",e2 
 
                             pass  
 
                             self.chance1+=1
 
         except Exception as parse_error:
 
             print "parse_error",parse_error
 
             pass
 
     
 
     def title_dealwith(self,title):
 
         
 
         #print "title",title
 
         a=title.find("<strong>")
 
         temp1=title[0:a]
 
         b=title.find("</strong>")
 
         temp2=title[a+8:b]
 
         temp3=title[b+9:len(title)]
 
         return (temp1+temp2+temp3).strip()
 
         
 
     def anaylis_info(self,n,filename,real_extension):
 
         print "success."
 
         
 
         #if self.wait_ana_queue.qsize()!=0:
 
             #n,filename,real_extension=self.wait.ana_queue.get()
 
         #self.lock()
 
         objURL=n['objURL'] #图片地址
 
         fromURLHost=n['fromURLHost'] #来源网站
 
         width=n['width']  #宽度
 
         height=n['height'] #高度
 
         di=n['di'] #用来唯一标识
 
         type=n['type'] #格式
 
         fromPageTitle=n['fromPageTitle'] #来自网站
 
         keyword=self.title_dealwith(fromPageTitle)
 
         cs=n['cs'] #未知
 
         os=n['os'] #未知
 
         temp = time.time()
 
         x = time.localtime(float(temp))
 
         acTime = time.strftime("%Y-%m-%d %H:%M:%S",x) #爬取时间
 
         self.dbcurr.execute('select ID from pic_info where cs=%s', (cs))
 
         y = self.dbcurr.fetchone()
 
         if not y:
 
             print 'add pic',filename
 
             self.commit_count+=1
 
             self.dbcurr.execute('INSERT INTO pic_info(objURL,fromURLHost,width,height,di,type,keyword,cs,os,acTime,filename,real_extension) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',(objURL,fromURLHost,width,height,di,type,keyword,cs,os,acTime,filename,real_extension))
 
             if self.commit_count==10:
 
                 self.dbconn.commit()
 
                 self.commit_count=0
 
         #self.unlock()
 
            
 
  
 
     def format_top_url(self,word,pn,rn):
 
  
 
         url = TOP_URL.format(word=word, pn=pn,rn=rn).encode('utf-8') 
 
         return url
 
  
 
     def how_many(self,data):
 
         try:
 
             ipdata = json.loads(data)
 
             if ipdata['displayNum']>0:
 
                 how_many=ipdata['displayNum']
 
                 return int(how_many)
 
             else:
 
                 return 0
 
         except Exception as e:
 
             pass
 
         
 
     def get_pic(self):
 
         """
 
         word="gif"
 
         pn=0
 
         rn=24
 
         if self.key_word_queue.qsize()!=0:
 
             word,pn,rn=self.key_word_queue.get()
 
         url=self.format_top_url(word,pn,rn)
 
         global proxy
 
         if url:
 
             try:
 
                 html=""
 
                 try:
 
                     req = urllib2.Request(url,headers=i_headers)
 
                     response = urllib2.urlopen(req, None,5)
 
                     #print "url",url
 
                     html = self.browser.openurl(url).read()
 
                 except Exception as err:
 
                     print "err",err
 
                     #pass
 
                 if html:
 
                     how_many=self.how_many(html)
 
                     #how_many=10000
 
                     print "how_many",how_many
 
                     word=self.get_para(url,"word")
 
                     rn=int(self.get_para(url,"rn"))
 
                     t=math.ceil(how_many/rn)
 
                     num = int(t)
 
                     for item  in xrange(0,num-1):
 
         """
 
         try:
 
             global proxy
 
             print "size of queue",self.request_queue.qsize()
 
             if self.request_queue.qsize()!=0:
 
                 id,word,page_num = self.request_queue.get()            
 
                 u=self.format_top_url(word,page_num,24)
 
                 self.lock()
 
                 self.dbcurr.execute('update info SET status=1 WHERE id=%s',(id))
 
                 self.dbconn.commit()
 
                 if self.chance >0 or self.chance1>1: #任何一个出问题都给换代理
 
                     if self.ID % 100==0:
 
                         self.dbcurr.execute("select count(*) from proxy")
 
                         for r in self.dbcurr:
 
                             count=r[0]
 
                         if self.ID>count:
 
                             self.ID=50
 
                     self.dbcurr.execute("select * from proxy where ID=%s",(self.ID))
 
                     results = self.dbcurr.fetchall()
 
                     for r in results:
 
                         protocol=r[1]
 
                         ip=r[2]
 
                         port=r[3]
 
                         pro=(protocol,ip+":"+port)
 
                         if pro not in self.next_proxy_set:
 
                             self.next_proxy_set.add(pro)
 
                     self.chance=0
 
                     self.chance1=0
 
                     self.ID+=1
 
                 self.unlock() 
 
                 proxy_support = urllib2.ProxyHandler(proxy)
 
                 opener = urllib2.build_opener(proxy_support)
 
                 urllib2.install_opener(opener)
 
                 html=""
 
                 try:
 
                     req = urllib2.Request(u,headers=i_headers)
 
                     #print "u=",u
 
                     response = urllib2.urlopen(req, None,5)
 
                     html = response.read()
 
                     if html:
 
                         #print "html",type(html)
 
                         self.parse_json(html)
 
                 except Exception as ex1:
 
                     #print "error=",ex1
 
                     pass
 
                     self.chance+=1
 
                     if self.chance>0 or self.chance1>1:
 
                         if len(self.next_proxy_set)>0:
 
                             protocol,socket=self.next_proxy_set.pop()
 
                             proxy= {protocol:socket}
 
                             print "change proxy finished<<",proxy,self.ID
 
         except Exception as e:
 
             print "error1",e
 
             pass
 
             
 
 if __name__ == '__main__':
 
  
 
     app = BaiduImage() 
 
     app.start_work(80)
 
     #app.generateSeed()
 
     while 1:
 
         pass        

百度图片爬虫-python版的更多相关文章

  1. 百度图片爬虫-python版-如何爬取百度图片?

    上一篇我写了如何爬取百度网盘的爬虫,在这里还是重温一下,把链接附上: http://www.cnblogs.com/huangxie/p/5473273.html 这一篇我想写写如何爬取百度图片的爬虫 ...

  2. python写的百度图片爬虫

    学了一下python正则表达式,写一个百度图片爬虫玩玩. 当技术遇上心术不正的人,就成我这样的2B青年了. python3.6开发.程序已经打包好,下载地址: http://pan.baidu.com ...

  3. python 百度图片爬虫

    # -*- coding:utf-8 -*- #https://blog.csdn.net/qq_32166627/article/details/60882964 import requests i ...

  4. 百度翻译爬虫-Web版(自动生成sign)

    # 面向对象 # 百度翻译 -- 网页版(自动获取token,sign) import requests import js2py import json import re class WebFan ...

  5. Python 爬虫实例(1)—— 爬取百度图片

    爬取百度图片  在Python 2.7上运行 #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: loveNight import jso ...

  6. 【Python网络爬虫四】通过关键字爬取多张百度图片的图片

    最近看了女神的新剧<逃避虽然可耻但有用>,同样男主也是一名程序员,所以很有共鸣 被大只萝莉萌的一脸一脸的,我们来爬一爬女神的皂片. 百度搜索结果:新恒结衣 本文主要分为4个部分: 1.下载 ...

  7. Python爬虫:通过关键字爬取百度图片

    使用工具:Python2.7 点我下载 scrapy框架 sublime text3 一.搭建python(Windows版本) 1.安装python2.7 ---然后在cmd当中输入python,界 ...

  8. 如何用Python爬虫实现百度图片自动下载?

    Github:https://github.com/nnngu/LearningNotes 制作爬虫的步骤 制作一个爬虫一般分以下几个步骤: 分析需求 分析网页源代码,配合开发者工具 编写正则表达式或 ...

  9. python爬虫获取百度图片(没有精华,只为娱乐)

    python3.7,爬虫技术,获取百度图片资源,msg为查询内容,cnt为查询的页数,大家快点来爬起来.注:现在只能爬取到百度的小图片,以后有大图片的方法,我会陆续发贴. #!/usr/bin/env ...

随机推荐

  1. php并发请求

    一般在php进行请求url的时候,直接用 fopen 函数就可以搞定了,比如像这样: $file=fopen("http://www.cnblogs.com","r&qu ...

  2. jquery封装常用方法

    var git = { //true表示格式正确 checkEmail: function (str) { -]{,})(\S*)/g) == null) { return false; } else ...

  3. 23.C#Queryable的扩展方法(十二章12.1-12.2)

    今天要写的知识还真心有点绕呢,对于第一节的内容,其实是把原先在内存中的数据源,换成了从数据库中提取出来的数据.从代码的使用方式上是一样的,直接跳过,来看看IEnumerable和IQueryable的 ...

  4. 茗洋Easy UI 1.3.2 部分问题解决系列专题[Combo模糊匹配中文问题 修复]

    本次给大家带来的EasyUI的我研究拓展的新特性 我使用的是  EasyUI 1.3.2版本的,项目是ASP.NET MVC3,但是本篇讲解用不上ASP.NET MVC,仅仅修改官方Demo你就知道怎 ...

  5. Bootstrap3.0学习第二十六轮(JavaScript插件——图片轮播)

    详情请查看http://aehyok.com/Blog/Detail/32.html 个人网站地址:aehyok.com QQ 技术群号:206058845,验证码为:aehyok 本文文章链接:ht ...

  6. JavaScript事件---事件绑定和深入

    发文不易,转载传播,请亲注明链接出处,谢谢! 内容提纲: 1.传统事件绑定的问题 2.W3C事件处理函数 3.IE事件处理函数 4.事件对象的其他内容 事件绑定分为两种:一种是传统事件绑定(内联模型, ...

  7. WCF安装Windows服务

    安装图解: 安装命令: 1. 开始 ->运行 ->cmd2. cd到C:\WINDOWS\Microsoft.NET\Framework\v4.0.30319(Framework版本号按I ...

  8. Timer中schedule()的用法

    schedule的意思(时间表.进度表) timer.schedule(new TimerTask(){ void run()},0, 60*60*1000);timer.schedule(new M ...

  9. 5.9-4用字符串生成器给字符串str追加1~10这10个数字

    package zfc; public class ZfcShcq { public static void main(String[] args) { // TODO Auto-generated ...

  10. Spring 管理数据源

    Spring 管理数据源 不管通过何种持久化技术,都必须通过数据连接访问数据库,在Spring中,数据连接是通过数据源获得的.在以往的应用中,数据源一般是Web应用服务器提供的.在Spring中,你不 ...