def _next_request_from_scheduler(self, spider):#engine从调度器取得下一个request slot = self.slot request = slot.scheduler.next_request() if not request: return d = self._download(request, spider)#登记一个下载,返回deferred对象 d.addBoth(self._handle_downloader_output, r
Scrapy去重原理 scrapy本身自带一个去重中间件 scrapy源码中可以找到一个dupefilters.py去重器 源码去重算法 # 将返回值放到集合set中,实现去重 def request_fingerprint(request, include_headers=None): if include_headers: include_headers = tuple(to_bytes(h.lower()) for h in sorted(include_headers)) cache