python文件管道 下载图集
# -*- coding: utf-8 -*-
import re
from time import sleep import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule class AngelSpider(CrawlSpider):
name = 'angel'
allowed_domains = ['angelimg.spbeen.com']
start_urls = ['http://angelimg.spbeen.com/'] base_url = "http://angelimg.spbeen.com"
rules = (
Rule(LinkExtractor(allow=r'^http://angelimg.spbeen.com/ang/\d+$'), callback='parse_item', follow=False),
) def parse_item(self, response):
print(response.url)
item = response.meta.get('item',False)
if item:
pass
else:
item = {}
item['files'] = []
item['file_urls'] = []
dir_name = response.xpath('.//div[@class="article"]/h2/text()').extract_first()
item['dir_name'] = dir_name.split('【')[0]
item['dir_name'] = re.sub(u"([^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a])","", item['dir_name']) img_url = response.xpath('.//div[@id="content"]/a/img/@src').extract_first()
item['file_urls'].append(img_url)
# 如果有下一页 请求下一页,没有数据丢回管道
next_url = response.xpath('.//div[@class="page"]//a[contains(@class,"next")]/@href').extract_first() #sleep(1)
if next_url:
next_url = self.base_url + next_url
yield scrapy.Request(next_url,callback=self.parse_item,meta={'item':item})
else:
yield item
管道 继承文件管道
# -*- coding: utf-8 -*- # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import hashlib
import os from scrapy.pipelines.files import FilesPipeline class AngelimgPipeline(object):
def process_item(self, item, spider):
return item from scrapy.http import Request
from scrapy.utils.python import to_bytes class DealFilePathPipeline(FilesPipeline):
def get_media_requests(self, item, info):
return [Request(x,meta={'item':item}) for x in item.get(self.files_urls_field, [])] def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('FilesPipeline.file_key(url) method is deprecated, please use '
'file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1) # check if called from file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url # detect if file_key() method has been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
## end of deprecation warning block
item = request.meta.get('item',{})
media_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
media_ext = os.path.splitext(url)[1] # change to request.url after deprecation
print(item)
return 'full2/{}/{}{}'.format(item['dir_name'],media_guid, media_ext)
return 'full/%s%s' % (media_guid, media_ext) # deprecated
def file_key(self, url):
return self.file_path(url) file_key._base = True
setting.py
# -*- coding: utf-8 -*- # Scrapy settings for angelImg project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'angelImg' SPIDER_MODULES = ['angelImg.spiders']
NEWSPIDER_MODULE = 'angelImg.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'angelImg (+http://www.yourdomain.com)' # Obey robots.txt rules
ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default)
#COOKIES_ENABLED = False # Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False # Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
"Referer":"http://angelimg.spbeen.com/"
} # Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'angelImg.middlewares.AngelimgSpiderMiddleware': 543,
#} # Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'angelImg.middlewares.AngelimgDownloaderMiddleware': 543,
#} # Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#} # Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
#'angelImg.pipelines.AngelimgPipeline': 300,
'angelImg.pipelines.DealFilePathPipeline': 200,
#'scrapy.pipelines.files.FilesPipeline': 2
} FILES_STORE='file_doload' # Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
python文件管道 下载图集的更多相关文章
- PYTHON文件多线程下载
其实,在一般的文件编程中,这有两个概念要说明: 第一是,下载一个大文件,将这个大文件多为多线程. 第二是,下载N多小文件,将每个线程指定下载多个小文件. 现在实现的是多线程下载一个大文件. 今天完成了 ...
- 2、Python djang 框架下的word Excel TXT Image 等文件的下载
2.python实现文件下载 (1)方法一.直接用a标签的href+数据库中文件地址,即可下载.缺点:word excel是直接弹框下载,对于image txt 等文件的下载方式是直接在新页面打开. ...
- python爬虫之下载文件的方式总结以及程序实例
python爬虫之下载文件的方式以及下载实例 目录 第一种方法:urlretrieve方法下载 第二种方法:request download 第三种方法:视频文件.大型文件下载 实战演示 第一种方法: ...
- Python selenium 文件自动下载 (自动下载器)
MyGithub:https://github.com/williamzxl 最新代码已经上传到Github,以下版本为stupid版本. 由于在下载过程中需要下载不同文件,所以可以把所有类型放在Va ...
- Python 文件操作函数
这个博客是 Building powerful image classification models using very little data 的前期准备,用于把图片数据按照教程指示放到规定的文 ...
- python文件打包格式,pip包管理
1..whl是python文件的一种打包格式, 在有些情况下,可以将文件的后缀名改为.zip并解压 2.cmd中,提示pip版本太低,先升级pip pip install --upgrade pi ...
- Python文件系统功能:os模块
Python文件系统功能:os模块 1.os模块方法分类 (1)目录: chdir() 改变工作目录 chroot() 设定当前进程的根目录 listdir() 列出指定目录下的所有文件名 mkdir ...
- 利用pyinstaller 打包Python文件
1.下载安装pyinstaller模块 cmd 命令: pip install pyinstaller cmd命令: pip list 查看自己安装的模块 2.建议把要大包的Python文件单独放到新 ...
- 随手用python写一个下载jdk源码爬虫
最近在研读jdk源码,网上找了下资源,发现都不完整. 后来新发现了一个有完整源码的地方,主要包括了java,c,c++的东西,装逼需要,就想拿来玩玩.但是,找了好多种下载打开的方式,发现都不对.于是, ...
随机推荐
- pytest文档3-pytest+Allure+jenkins+邮箱发送
前言: 虽然网上有很多邮件配置的文章,但还是想自己写一下配置的过程,因为在中间也碰到了不同坑.按照这个文档配置的话,99%都可以成功. 一.jenkins 配置邮箱 1.打开jenkins后进入点 ...
- NuxtJS快速入门
服务器端渲染(SSR) 知识储备 ES6 Nodejs Vue React Angular 什么是服务器端渲染 前端渲染:html页面作为静态文件存在,前端请求时后端不对该文件做任何内容上的修改,直接 ...
- 疑难杂症 | Excel VBA锁定指定单元格区域
背景:锁定EXCEL表头 一.手动操作流程 其基本逻辑并不赋值,手动操作流程是: 1.取消所有单元格的"锁定"格式 CTRL+A,选中全部的单元格→单击右键→设置单元格格式→保护→ ...
- 轻轻松松学CSS:overflow
一.overflow的定义 overflow,音[əʊvəˈfləʊ],义[溢出],就像2.2米的人躺在1.8米的床上,腿得耷拉到床外一样.overflow 属性用于控制内容溢出容器时显示的方式 二. ...
- 刷题[CISCN2019 总决赛 Day2 Web1]Easyweb
解题思路 打开网页是这样一个登陆框,随机试了一下常见弱密钥,二次注入等.均是返回不同的猫咪图案 不同的id对应不同的猫咪图案.经测试,返回的id应该是无序,随机的.感觉这里有可能存在注入点,但是测试好 ...
- websocket+sockjs+stompjs详解及实例
最近有项目需求要用到websocket,刚开始以为很简单,但是随着遇到问题,深入了解,才知道websocket并不是想象中的那么简单,这篇文章主要是考虑websocket在客户端的使用. 1.http ...
- QT记录
/******************************************************************************************/ .
- 使用SSM框架实现Sql数据导出成Excel表
SSM框架实现SQL数据导出Excel 思路 首先在前端页面中添加一个导出功能的button,然后与后端controller进行交互. 接着在相应的controller中编写导出功能方法. 方法体: ...
- [论文理解] Good Semi-supervised Learning That Requires a Bad GAN
Good Semi-supervised Learning That Requires a Bad GAN 恢复博客更新,最近没那么忙了,记录一下学习. Intro 本文是一篇稍微偏理论的半监督学习的 ...
- C语言编程入门之--第六章C语言控制语句
导读:本章带读者理解什么是控制语句,然后逐个讲解C语言常用的控制语句,含有控制语句的代码量多起来后就要注意写代码的风格了,本章末节都是练习题,大量的练习才能掌握好控制语句的使用. 6.1 什么是控制语 ...