scrapy爬行乌云网公开漏洞程序的分析
# -*- coding: utf-8 -*-
from datetime import datetime
import pymongo
import scrapy
from wooyun.items import WooyunItem
from scrapy.conf import settings class WooyunSpider(scrapy.Spider):
name = "wooyun"#蜘蛛名字,运行命令为:scrapy crawl wooyun
allowed_domains = ["wooyun.org"]
start_urls = [
'http://wooyun.org/bugs/new_public/'
]#spider类会遍历该类变量for url in self.start_urls:yield Request(url, dont_filter=True)爬行的起点
def __init__(self,page_max=settings['PAGE_MAX_DEFAULT'],local_store=settings['LOCAL_STORE_DEFAULT'],\
update=settings['UPDATE_DEFAULT'],*args, **kwargs):
self.page_max = int(page_max)
self.local_store = 'true' == local_store.lower()
self.update = 'true' == update.lower() self.connection_string = "mongodb://%s:%d" % (settings['MONGODB_SERVER'],settings['MONGODB_PORT'])
self.client = pymongo.MongoClient(self.connection_string)
self.db = self.client[settings['MONGODB_DB']]
self.collection = self.db[settings['MONGODB_COLLECTION']] def closed(self,reason):
self.client.close() def parse(self, response):#当爬行返回第一个响应时会调用这个函数
total_pages = response.xpath("//p[@class='page']/text()").re('\d+')[1]
if self.page_max == 0:
end_page = int(total_pages)
else:
end_page = self.page_max
for n in range(1,end_page + 1):
page = "/bugs/new_public/page/%d" %n#乌云公开漏洞列表的一页
url = response.urljoin(page)
yield scrapy.Request(url, self.parse_list)#分析一页的漏洞列表 def parse_list(self,response):#取得一页列表的链接
links = response.xpath('//tbody/tr/td/a/@href').extract()
for url in links:
wooyun_id = url.split('/')[2]
if self.update == True or self.__search_mongodb(wooyun_id) == False:
url = response.urljoin(url)
yield scrapy.Request(url, self.parse_detail) def parse_detail(self,response):#对每一个漏洞的内容的提取
item = WooyunItem()
item['wooyun_id'] = response.xpath('//*[@id="bugDetail"]/div[5]/h3[1]/a/@href').extract()[0].split('/')[2]
item['title'] = response.xpath('//title/text()').extract()[0].split("|")[0]
item['bug_type'] = response.xpath("//h3[@class='wybug_type']/text()").extract()[0].split(u':')[1].strip()
#item['bug_type'] = response.xpath('//*[@id="bugDetail"]/div[5]/h3[7]/text()').extract()[0].split(u':')[1].strip()
#some author not text,for examp:
#http://wooyun.org/bugs/wooyun-2010-01010
#there will be error while parse author,so do this
try:
#item['author'] = response.xpath("//h3[@class='wybug_author']/a/text()").extract()[0]
item['author'] = response.xpath('//*[@id="bugDetail"]/div[5]/h3[4]/a/text()').extract()[0]
except:
item['author'] ='<Parse Error>'
#the response.body type is str,so we need to convert to utf-8
#if not utf-8,saving to mongodb may have some troubles
item['html'] = response.body.decode('utf-8','ignore')
#dt = response.xpath("//h3[@class='wybug_date']/text()").re("[\d+]{4}-[\d+]{2}-[\d+]{2}")[0].split('-')
dt = response.xpath('//*[@id="bugDetail"]/div[5]/h3[5]/text()').re("[\d+]{4}-[\d+]{2}-[\d+]{2}")[0].split('-')
item['datetime'] = datetime(int(dt[0]),int(dt[1]),int(dt[2]))
#dt = response.xpath("//h3[@class='wybug_open_date']/text()").re("[\d+]{4}-[\d+]{2}-[\d+]{2}")[0].split('-')
dt = response.xpath('//*[@id="bugDetail"]/div[5]/h3[6]/text()').re("[\d+]{4}-[\d+]{2}-[\d+]{2}")[0].split('-')
item['datetime_open'] = datetime(int(dt[0]),int(dt[1]),int(dt[2]))
#images url for download
item['image_urls']=[]
if self.local_store:
#乌云图片目前发两种格式,一种是http://static.wooyun.org/wooyun/upload/,另一格式是/upload/...
#因此,对后一种在爬取时,增加http://www.wooyun.org,以形成完整的url地址
#同时,在piplines.py存放时,作相应的反向处理
image_urls = response.xpath("//img[contains(@src, '/upload/')]/@src").extract()
for u in image_urls:
if self.__check_ingnored_image(u):
continue
if u.startswith('/'):
u = 'http://www.wooyun.org' + u
item['image_urls'].append(u)
return item #产生一个item项目 def __check_ingnored_image(self,image_url):
for ignored_url in settings['IMAGE_DOWLOAD_IGNORED']:
if ignored_url in image_url:
return True return False def __search_mongodb(self,wooyun_id):
#
wooyun_id_exsist = True if self.collection.find({'wooyun_id':wooyun_id}).count()>0 else False
#
return wooyun_id_exsist
scrapy.Request(url, self.parse_detail) Request对象有回调函数。Request对象放到scheduler的队列中,engine会从schedule中取得request。
scrapy的crawl命令调用的函数
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0] self.crawler_process.crawl(spname, **opts.spargs)#crawler_process爬行进程应该是ExecutionEngine实例
self.crawler_process.start()
C:\Python27\Lib\site-packages\scrapy\cmdline.py
def execute(argv=None, settings=None):
......
cmd.settings = settings
cmd.add_options(parser)
opts, args = parser.parse_args(args=argv[1:])
_run_print_help(parser, cmd.process_options, args, opts) cmd.crawler_process = CrawlerProcess(settings)#为命令对象添加爬行进程
_run_print_help(parser, _run_command, cmd, args, opts)
sys.exit(cmd.exitcode)
CrawlerProcess(settings)[C:\Python27\Lib\site-packages\scrapy\cawler.py]
class CrawlerProcess(CrawlerRunner):
"""
A class to run multiple scrapy crawlers in a process simultaneously. This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
for starting a Twisted `reactor`_ and handling shutdown signals, like the
keyboard interrupt command Ctrl-C. It also configures top-level logging. This utility should be a better fit than
:class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
Twisted `reactor`_ within your application. The CrawlerProcess object must be instantiated with a
:class:`~scrapy.settings.Settings` object. This class shouldn't be needed (since Scrapy is responsible of using it
accordingly) unless writing scripts that manually handle the crawling
process. See :ref:`run-from-script` for an example.
""" def __init__(self, settings=None):
super(CrawlerProcess, self).__init__(settings)
install_shutdown_handlers(self._signal_shutdown)
configure_logging(self.settings)
log_scrapy_info(self.settings) def _signal_shutdown(self, signum, _):
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
logger.info("Received %(signame)s, shutting down gracefully. Send again to force ",
{'signame': signame})
reactor.callFromThread(self._graceful_stop_reactor) def _signal_kill(self, signum, _):
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
logger.info('Received %(signame)s twice, forcing unclean shutdown',
{'signame': signame})
reactor.callFromThread(self._stop_reactor) def start(self, stop_after_crawl=True):
"""
This method starts a Twisted `reactor`_, adjusts its pool size to
:setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based
on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`. If `stop_after_crawl` is True, the reactor will be stopped after all
crawlers have finished, using :meth:`join`. :param boolean stop_after_crawl: stop or not the reactor when all
crawlers have finished
"""
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(self._stop_reactor) reactor.installResolver(self._get_dns_resolver())
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
reactor.run(installSignalHandlers=False) # blocking call def _get_dns_resolver(self):
if self.settings.getbool('DNSCACHE_ENABLED'):
cache_size = self.settings.getint('DNSCACHE_SIZE')
else:
cache_size = 0
return CachingThreadedResolver(
reactor=reactor,
cache_size=cache_size,
timeout=self.settings.getfloat('DNS_TIMEOUT')
) def _graceful_stop_reactor(self):
d = self.stop()
d.addBoth(self._stop_reactor)
return d def _stop_reactor(self, _=None):
try:
reactor.stop()
except RuntimeError: # raised if already stopped or in shutdown stage
pass
scrapy爬行乌云网公开漏洞程序的分析的更多相关文章
- CVE-2015-1641 Office类型混淆漏洞及shellcode分析
作者:枕边月亮 原文来自:CVE-2015-1641 Office类型混淆漏洞及shellcode分析 0x1实验环境:Win7_32位,Office2007 0x2工具:Windbg,OD,火绒剑, ...
- 【转】cve2014-3153 漏洞之详细分析与利用
背景学习: Linux Futex的设计与实现 使用者角度看bionic pthread_mutex和linux futex实现 By kernux TopSec α-lab 一 漏洞概述 这个漏洞是 ...
- 漏洞扫描与分析-Nessus-8.7.2最新版-安装-部署-使用
漏洞扫描与分析-Nessus 2019/10/10 Chenxin 简介 官网 https://zh-cn.tenable.com/ 产品 https://zh-cn.tenable.com/prod ...
- BEC合约整数溢出漏洞还原与分析
一.币圈一秒,人间一年 有道是币圈一日,人间一年.这个说法又得升级了,叫币圈一秒,人间一年. 前不久,币圈又出大事啦.BEC智能合约被爆出整数溢出漏洞,导致黑客能无限印币,在一次交易中,也就那么几秒钟 ...
- 堆栈上的舞蹈之释放重引用(UAF) 漏洞原理实验分析
0x01 前言 释放重引用的英文名名称是 Use After Free,也就是著名的 UAF 漏洞的全称.从字面意思可以看出 After Free 就是释放后的内存空间,Use 就是使用的意思,使用释 ...
- Java反序列化漏洞Apache CommonsCollections分析
Java反序列化漏洞Apache CommonsCollections分析 cc链,既为Commons-Collections利用链.此篇文章为cc链的第一条链CC1.而CC1目前用的比较多的有两条链 ...
- [原创]推荐一款强大的.NET程序内存分析工具.NET Memory Profiler
[原创]推荐一款强大的.NET程序内存分析工具.NET Memory Profiler 1 官方网站:http://memprofiler.com/2 下载地址:http://memprofiler. ...
- VS2010/MFC编程入门之四(MFC应用程序框架分析)
VS2010/MFC编程入门之四(MFC应用程序框架分析)-软件开发-鸡啄米 http://www.jizhuomi.com/software/145.html 上一讲鸡啄米讲的是VS2010应用 ...
- 服务器程序源代码分析之三:gunicorn
服务器程序源代码分析之三:gunicorn 时间:2014-05-09 11:33:54 类别:网站架构 访问: 641 次 gunicorn是一个python web 服务部署工具,类似flup,完 ...
随机推荐
- bzoj5006: [THUWC2017 Bipartite]随机二分图
某人在玩一个非常神奇的游戏.这个游戏中有一个左右各 nnn 个点的二分图,图中的边会按照一定的规律随机出现. 为了描述这些规律,某人将这些边分到若干个组中.每条边或者不属于任何组 (这样的边一定不会出 ...
- java的应用包的方法,及调用类里面函数的原理
selenium官网下载的selenium包 包导入eclipse 见:https://www.cnblogs.com/kaibindirver/p/10674604.html 代码
- Python——ipython(python programming)
Tab自动补充 Ctrl+c中断程序 ?帮助调出文档 _得到上次的结果 ,__的到上上次结果,___得到上上次结果 %开头的为魔术命令 %timeit 得到运算时间,多次求平均 %%time ...
- 廖雪峰Java6 IO编程-2input和output-4Filter模式
1.JDK提供的InputStream分为两类: 直接提供数据的InputStream * FileInputStream:从文件读取 * ServletInputStream:从HTTP请求读取数据 ...
- redis集群创建
先参考文章 https://www.cnblogs.com/PatrickLiu/p/8458788.html https://blog.csdn.net/fengshizty/article/det ...
- Jmeter(九)JDBC连接池
JDBC为java访问数据库提供通用的API,可以为多种关系数据库提供统一访问.因为SQL是关系式数据库管理系统的标准语言,只要我们遵循SQL规范,那么我们写的代码既可以访问MySQL又可以访问SQL ...
- mysql为什么要分库分表?
1 基本思想之什么是分库分表?从字面上简单理解,就是把原本存储于一个库的数据分块存储到多个库上,把原本存储于一个表的数据分块存储到多个表上. 2 基本思想之为什么要分库分表? 单表操作数据量有最优值, ...
- android studio AIDL 编译时 错误:找不到符号
原贴路径:http://blog.csdn.net/baidu_30164869/article/details/51036405 PS:在android studio中 当将java文件放到aidl ...
- Mysql 之分库分表方案
Mysql分库分表方案 为什么要分表 当一张表的数据达到几千万时,你查询一次所花的时间会变多,如果有联合查询的话,我想有可能会死在那儿了.分表的目的就在于此,减小数据库的负担,缩短查询时间. mysq ...
- SAS FORMAT 逻辑库存储 【输出格式 没有找到或无法加载】解决方法
SAS FORMAT 逻辑库存储 [输出格式 没有找到或无法加载]解决方法:需要指定FORMAT 搜索的路径:OPTIONS FMTSEARCH=(F WORK); 以下为完整示例代码: 00@DA ...