#!/usr/bin/python3
#-*- coding:utf-8 -*-
"""
create 2018-02-27
author zl
desc:
https://indianexpress.com#印度快报
files:
indianexpress.py #爬虫
config.ini #配置文件
max_main_interval #脚本循环间隔
network_timeout #超时
proxy #代理
dependent:
configparser,bs4
usage:
skip """
config.ini
[indianexpress]
max_main_interval=
network_timeout=
proxy=
indianexpress.py
#!/usr/bin/python3
#-*- coding:utf- -*-
"""
create --
author zl
desc:
https://indianexpress.com#印度快报
files:
indianexpress.py #爬虫
config.ini #配置文件
max_main_interval #脚本循环间隔
network_timeout #超时
proxy #代理
dependent:
configparser,bs4
usage:
skip """
from pb_code import *
from bs4 import BeautifulSoup
import re
import time def get_list(urls):
for url in urls['url']:
try:
resp=fetch_get(False,url,network_timeout,proxy)
if resp is not None:
soup=BeautifulSoup(resp,'html.parser')
news_list=soup.find_all('div',class_=urls['list_class'])
for art in news_list:
timedate=art.find('div',class_='date').text
#print(timedate)
timepattern=re.compile('\w+ \d{1,2}, \d{4} \d{1,2}:\d{1,2}')
t1=timepattern.findall(timedate)
timestamp=int(time.mktime(time.strptime(t1[],"%B %d, %Y %H:%M")))+*2.5
if 'pm' in timedate:
timestamp=timestamp+*
if int(time.time())-timestamp<:
title=art.find('div',class_='title')
#print(title)
url=title.find('a')
art_json={
"publishtime":time.strftime("%Y-%m-%d %X",time.localtime(timestamp)),
"author":"",
"title":url.text,
"content":"",
"gathertime":time.strftime("%Y-%m-%d %X",time.localtime()),
"inserttime":"",
"url":url.attrs['href'],
"group_id":"",
"site_id":"",
"site_url":"",
"language":"EN" }
get_page(urls['page_content_id'],art_json)
elif timestamp is not None:
break
except Exception as e:
print('get_list {} ,{}'.format(e,url))
time.sleep(random.randint(,)) def get_page(tag_id,art_json):
resp=fetch_get(False,art_json['url'],network_timeout,proxy)
#print(resp)
if resp is not None:
soup=BeautifulSoup(resp,'html.parser')
art_content=soup.find('div',itemprop=tag_id).find_all('p')
content=map(lambda x:x.text,art_content)
#print(list(content))
new_content=''.join(list(content))
art_json['content']=new_content
art_json['inserttime']=time.strftime("%Y-%m-%d %X",time.localtime())
print(art_json)
time.sleep(random.randint(,)) if __name__=='__main__':
site_name='indianexpress'
max_main_interval,network_timeout,proxy=load_config(site_name)
while True:
get_list(load_urls('indianexpress'))
print("{} all done ,sleep{} sec.".format(site_name,max_main_interval))
time.sleep(max_main_interval)
get_page()
pb_code.py
from configparser import ConfigParser
import json
import random
import requests USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
def load_config(segment):
'''从配置文件加载参数信息'''
config=ConfigParser()
config.read('config.ini','utf-8-sig')
p1=int(config.get(segment,'max_main_interval'))
p2=int(config.get(segment,'network_timeout'))
p3=config.get(segment,'proxy')
if p3=='':
p3=None
return p1,p2,p3
def load_urls(fname):
'''加载采集url'''
with open(fname+'.json',encoding='utf-8') as f:
r =json.load(f)
return r
def fetch_get(is_json,url,timeout,proxy):
'''发送get请求并返回解析数据'''
head={
'User-Agent':random.choice(USER_AGENTS)
}
req=requests.get(url,headers=head,timeout=timeout,proxies={'http',proxy} if proxy else None)
if is_json:
x=json.loads(req.text)
else:
x=req.text
return x
def save_db(jdata):
'''保存json到kafka:通过中转接口'''
print("site_name:{} url{}".format(jdata['site_name'],jdata['url']))
requests.post('',json=jdata,headers={"Content-Type": "application/json","keep_alive":'False'})
indianexpress.json
{
"list_class": "articles",
"page_content_id": "articleBody",
"url": [
"http://indianexpress.com/latest-news/"
]
}

 

 

栏目抓取网站日kafka的更多相关文章

  1. 抓取网站数据不再是难事了,Fizzler(So Easy)全能搞定

    首先从标题说起,为啥说抓取网站数据不再难(其实抓取网站数据有一定难度),SO EASY!!!使用Fizzler全搞定,我相信大多数人或公司应该都有抓取别人网站数据的经历,比如说我们博客园每次发表完文章 ...

  2. 使用PHP抓取网站ico图标

    网站许久没用更新,以后会经常更新,本次分享一个使用PHP抓取网站ico的程序,提供一个网站列表后对网站的ico进行下载抓取,具体代码如下: <?php /** * 更新热站ico * gao 2 ...

  3. Python入门-编写抓取网站图片的爬虫-正则表达式

    //生命太短 我用Python! //Python真是让一直用c++的村里孩子长知识了! 这个仅仅是一个测试,成功抓取了某网站1000多张图片. 下一步要做一个大新闻 大工程 #config = ut ...

  4. 【转】详解抓取网站,模拟登陆,抓取动态网页的原理和实现(Python,C#等)

    转自:http://www.crifan.com/files/doc/docbook/web_scrape_emulate_login/release/html/web_scrape_emulate_ ...

  5. 利用wget 抓取 网站网页 包括css背景图片

    利用wget 抓取 网站网页 包括css背景图片 wget是一款非常优秀的http/ftp下载工具,它功能强大,而且几乎所有的unix系统上都有.不过用它来dump比较现代的网站会有一个问题:不支持c ...

  6. 搜索会抓取网站域名的whoise信息吗

    http://www.wocaoseo.com/thread-309-1-1.html 网站是否在信产部备案,这是不是会成为影响网站收录和排名的一个因素?百度是否会抓取域名注册人的相关whois信息吗 ...

  7. C# 实现抓取网站页面内容

    抓取新浪网的新闻栏目,如图所示: 使用 谷歌浏览器的查看源代码: 通过分析得知,我们所要找的内容在以下两个标签之间: <!-- publish_helper name='要闻-新闻' p_id= ...

  8. [Python爬虫] 之二十八:Selenium +phantomjs 利用 pyquery抓取网站排名信息

    一.介绍 本例子用Selenium +phantomjs爬取中文网站总排名(http://top.chinaz.com/all/index.html,http://top.chinaz.com/han ...

  9. C# 抓取网站数据

    项目主管说这是项目中的一个亮点(无语...), 类似于爬虫一类的东西,模拟登陆后台系统,获取需要的数据.然后就开始研究这个. 之前有一些数据抓取的经验,抓取流程无非:设置参数->服务端发送请求- ...

随机推荐

  1. 插头DP--URAL1519Formula 1

    去年的老朋友.挺怀念的,回来看看. $n \leq 12,m \leq 12$,$n*m$的01矩形,问在0中走路的欧拉回路数.答案保证longlong范围. 先设计插头:左右括号和空插头:然后分3* ...

  2. RRAS

    远程访问控制是Windows NT.Win200x Server提供的一种远程服务,它允许用户从远端通过拨号连接连接到一个本地的计算机网络,一旦建立了连接,就相当于处在了本地的LAN中,从而可以使用各 ...

  3. R语言入门视频笔记--8--数据框

    一.数据框 使用data.frame函数生成数据框 x <- c(20122014101:20122014128) y <- rnorm(28,85,18) #生成28个平均数为85,方差 ...

  4. C# 编程中的堆栈(Stack)和队列(Queue)

    一.什么是堆?(Heap)      堆是无序的,是一片不连续的内存域,由用户自己来控制和释放,如果用户自己不释放的话,当内存达到一定的特定值时,通过垃圾回收器(GC)来回收.      是程序运行期 ...

  5. RabbitMQ 简介以及使用场景

    目录 一. RabbitMQ 简介 二. RabbitMQ 使用场景 1. 解耦(为面向服务的架构(SOA)提供基本的最终一致性实现) 2. 异步提升效率 3. 流量削峰 三. 引入消息队列的优缺点 ...

  6. [WASM Rust] Create and Publish a NPM Package Containing Rust Generated WebAssembly using wasm-pack

    wasm-pack is a tool that seeks to be a one-stop shop for building and working with Rust generated We ...

  7. Android4.4 Telephony流程分析——彩信(MMS)发送过程

    本文代码以MTK平台Android 4.4为分析对象,与Google原生AOSP有些许差异,请读者知悉. 彩信收发依靠WAP网络,在Android4.4中的实现基于Http协议的应用.下图为几个彩信传 ...

  8. c++面试题目(3)

    这些东西有点烦,有点无聊.如果要去C++面试就看看吧.几年前网上搜索的.刚才看到,就整理一下,里面有些被我改了,感觉之前说的不对或不完善. 1.求下面函数的返回值( 微软) int func(x)  ...

  9. nginx+play framework +mongoDB+redis +mysql+LBS实战总结

    nginx+play framework +mongoDB+redis +mysql+LBS实战总结(一) 使用这个样的组合结构已经很久了,主要是实现web-server,不是做网站,二是纯粹的数据服 ...

  10. html中跳转方法(含设定时间)

    脚本方式 如: <script language="JavaScript" type="text/JavaScript"> <!-- wind ...