go并发版爬虫
并发版爬虫
代码实现
/crawler/main.go
package main
import (
"learn/crawler/engine"
"learn/crawler/scheduler"
"learn/crawler/zhenai/parser"
)
func main() {
e := engine.ConcurrentEngine{
Scheduler: &scheduler.QueuedScheduler{},
WorkerCount: 20,
}
e.Run(engine.Request{
Url: "http://www.zhenai.com/zhenghun",
ParseFunc: parser.ParseCityList,
})
//测试上海单个城市
//e.Run(engine.Request{
// Url: "http://www.zhenai.com/zhenghun/shanghai",
// ParseFunc: parser.ParseCity,
//})
}
/crawler/engine/simple.go
package engine
import (
"learn/crawler/fetcher"
"log"
)
type SimpleEngine struct {
}
func (e SimpleEngine) Run(seeds ...Request) {
var requests []Request
for _, r := range seeds {
requests = append(requests, r)
}
for len(requests) > 0 {
r := requests[0]
requests = requests[1:]
parseResult, err := worker(r)
if err != nil {
continue
}
requests = append(requests, parseResult.Requests...)
for _, item := range parseResult.Items{
log.Printf("Got item %v", item)
}
}
}
func worker(r Request) (ParseResult, error) {
log.Printf("Fetching %s", r.Url)
body, err := fetcher.Fetch(r.Url)
if err != nil {
log.Printf("Fetcher: error" + "fetching url %s: %v", r.Url, err)
return ParseResult{}, err
}
return r.ParseFunc(body), nil
}
/crawler/engine/concurrent.go
package engine
import (
"log"
)
type ConcurrentEngine struct {
Scheduler Scheduler
WorkerCount int
}
type Scheduler interface {
ReadyNotifier
Submit(Request)
WorkerChan() chan Request
Run()
}
type ReadyNotifier interface {
WorkerReady(chan Request)
}
func (e *ConcurrentEngine) Run(seeds ...Request) {
out := make(chan ParseResult)
e.Scheduler.Run()
for i := 0; i < e.WorkerCount; i++ {
createWork(e.Scheduler.WorkerChan(), out, e.Scheduler)
}
for _, r := range seeds {
e.Scheduler.Submit(r)
}
itemCount := 0
for {
result := <- out
for _, item := range result.Items {
log.Printf("Got item #%d: %v", itemCount, item)
itemCount++
}
for _, request := range result.Requests {
e.Scheduler.Submit(request)
}
}
}
func createWork(in chan Request, out chan ParseResult, ready ReadyNotifier) {
go func() {
for {
ready.WorkerReady(in)
request := <- in
result, err := worker(request)
if err != nil {
continue
}
out <- result
}
}()
}
/crawler/engine/typers.go
package engine
type Request struct {
Url string
ParseFunc func([]byte) ParseResult
}
type ParseResult struct {
Requests []Request
Items []interface{}
}
func NilParser([]byte) ParseResult{
return ParseResult{}
}
/crawler/fetcher/fetcher.go
package fetcher
import (
"bufio"
"fmt"
"golang.org/x/net/html/charset"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
"io/ioutil"
"log"
"net/http"
"time"
)
var rateLimiter = time.Tick(100 * time.Millisecond)
func Fetch(url string) ([]byte, error) {
<- rateLimiter
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36")
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Wrong status code: %d", resp.StatusCode)
}
bodyReader := bufio.NewReader(resp.Body)
e := determineEncoding(bodyReader)
utf8Reader := transform.NewReader(bodyReader, e.NewDecoder())
return ioutil.ReadAll(utf8Reader)
}
func determineEncoding(r *bufio.Reader) encoding.Encoding {
bytes, err := r.Peek(1024)
if err != nil {
log.Printf("Fetcher error: %v", err)
return unicode.UTF8
}
e, _, _ := charset.DetermineEncoding(bytes, "")
return e
}
/crawler/zhenai/parser/citylist.go
package parser
import (
"learn/crawler/engine"
"regexp"
)
const cityListRe = `<a href="(http://www.zhenai.com/zhenghun/[0-9a-z]+)" [^>]*>([^<]+)</a>`
func ParseCityList(contents []byte) engine.ParseResult {
re := regexp.MustCompile(cityListRe)
matches := re.FindAllSubmatch(contents, -1)
result := engine.ParseResult{}
for _, m := range matches {
result.Items = append(result.Items, "City: "+string(m[2]))
result.Requests = append(result.Requests, engine.Request{
Url: string(m[1]),
ParseFunc: ParseCity,
})
}
return result
}
/crawler/zhenai/parser/city.go
package parser
import (
"learn/crawler/engine"
"regexp"
)
var (
profileRe = regexp.MustCompile(`<a href="(http://album.zhenai.com/u/[0-9]+)" [^>]*>([^<]+)</a>`)
cityUrlRe = regexp.MustCompile(`href="(http://www.zhenai.com/zhenghun/[^"]+)"`)
)
func ParseCity(contents []byte) engine.ParseResult {
matches := profileRe.FindAllSubmatch(contents, -1)
result := engine.ParseResult{}
for _, m := range matches {
name := string(m[2])
result.Items = append(result.Items, "User "+name)
result.Requests = append(result.Requests, engine.Request{
Url: string(m[1]),
ParseFunc: func(c []byte) engine.ParseResult {
return ParseProfile(c, "name:"+name)
},
})
}
matches = cityUrlRe.FindAllSubmatch(contents, -1)
for _, m := range matches {
result.Requests = append(result.Requests, engine.Request{
Url: string(m[1]),
ParseFunc: ParseCity,
})
}
return result
}
/crawler/zhenai/parser/profile.go
package parser
import (
"learn/crawler/engine"
"learn/crawler/model"
"regexp"
)
const all = `<div class="m-btn purple" data-v-8b1eac0c>([^<]+)</div>`
func ParseProfile(contents []byte, name string) engine.ParseResult {
profile := model.Profile{}
profile.User = append(profile.User, name)
re := regexp.MustCompile(all)
match := re.FindAllSubmatch(contents,-1)
if match != nil {
for _, m := range match {
profile.User = append(profile.User, string(m[1]))
}
}
result := engine.ParseResult{
Items: []interface{}{profile},
}
return result
}
/crawler/model/profile.go
package model
type Profile struct {
User []string
}
/crawler/scheduler/queued.go
package scheduler
import "learn/crawler/engine"
type QueuedScheduler struct {
requestChan chan engine.Request
workChan chan chan engine.Request
}
func (s *QueuedScheduler) WorkerChan() chan engine.Request {
return make(chan engine.Request)
}
func (s *QueuedScheduler) Submit(r engine.Request) {
s.requestChan <- r
}
func (s *QueuedScheduler) WorkerReady(w chan engine.Request){
s.workChan <- w
}
func (s *QueuedScheduler) Run(){
s.workChan = make(chan chan engine.Request)
s.requestChan = make(chan engine.Request)
go func() {
var requestQ []engine.Request
var workerQ []chan engine.Request
for {
var activeRequest engine.Request
var activeWorker chan engine.Request
if len(requestQ) > 0 && len(workerQ) > 0 {
activeRequest = requestQ[0]
activeWorker = workerQ[0]
}
select {
case r := <-s.requestChan:
requestQ = append(requestQ, r)
case w := <-s.workChan:
workerQ = append(workerQ, w)
case activeWorker <- activeRequest:
workerQ = workerQ[1:]
requestQ = requestQ[1:]
}
}
}()
}
/crawler/scheduler/simple.go
package scheduler
import "learn/crawler/engine"
type SimpleScheduler struct {
workerChan chan engine.Request
}
func (s *SimpleScheduler) WorkerChan() chan engine.Request {
return s.workerChan
}
func (s *SimpleScheduler) WorkerReady(chan engine.Request) {
}
func (s *SimpleScheduler) Run() {
s.workerChan = make(chan engine.Request)
}
func (s *SimpleScheduler) Submit(r engine.Request) {
go func() { s.workerChan <- r }()
}
完整项目
https://gitee.com/FenYiYuan/golang-cpdcrawler.git
go并发版爬虫的更多相关文章
- Go语言之进阶篇爬百度贴吧并发版
1.爬百度贴吧并发版 示例: package main import ( "fmt" "net/http" "os" "strco ...
- Go HelloWorld 网络版和并发版
网络版 package main import ( "net/http" "fmt" ) func main() { http.HandleFunc(" ...
- go-爬虫-百度贴吧(并发版)
爬取百度贴吧的网页 非并发版 package main import ( "fmt" "io" "net/http" "os&qu ...
- go单任务版爬虫
go单任务版爬虫(爬取珍爱网) 爬虫总体算法 单任务版爬虫架构 任务 获取并打印所在城市第一页用户的详细信息 代码实现 /crawler/main.go package main import ( & ...
- 区划代码 node 版爬虫尝试
前言 对于区划代码数据,很多人都不会陌生,大多公司数据库都会维护一份区划代码,包含省市区等数据.区划信息跟用户信息息息相关,往往由于历史原因很多数据都是比较老的数据,且不会轻易更改.网上也有很多人提供 ...
- python链家网高并发异步爬虫asyncio+aiohttp+aiomysql异步存入数据
python链家网二手房异步IO爬虫,使用asyncio.aiohttp和aiomysql 很多小伙伴初学python时都会学习到爬虫,刚入门时会使用requests.urllib这些同步的库进行单线 ...
- 最新IP地址数据库Dat格式-高性能高并发版(2019年3月)
最新IP地址数据库->Dat 二进制文件 高性能高并发-qqzeng-ip.dat 格式 全球IP数据库-20190301-Dat 版 国内IP数据库-20190 ...
- python链家网高并发异步爬虫and异步存入数据
python链家网二手房异步IO爬虫,使用asyncio.aiohttp和aiomysql 很多小伙伴初学python时都会学习到爬虫,刚入门时会使用requests.urllib这些同步的库进行单线 ...
- python学习_新闻联播文字版爬虫(V 1.0版)
python3的爬虫练习,爬取的是新闻联播文字版网站 #!/usr/bin/env python # -*- coding: utf-8 -*- ''' __author__ = 'wyf349' _ ...
随机推荐
- 数据结构与算法 --- js描述集合
js描述集合 function Set(){ this.datasource=[]; this.add=add; this.remove=remove; //this.size=size; //thi ...
- CSRF攻击原理
CSRF CSRF(Cross-site request forgery)跨站请求伪造,CSRF是一种夹持用户在已经登陆的web应用程序上执行非本意的操作的攻击方式.相比于XSS,CSRF是利用了系统 ...
- 线段树+Lazy标记(我的模版)
#include <bits/stdc++.h> using namespace std; typedef long long ll; typedef unsigned long long ...
- Windows系统以及谷歌浏览器快捷键,控制台常用命令
win10系统 快捷键 win+D 回到桌面 控制台代码(win+R打开控制台) calc 系统计算器 谷歌浏览器快捷键 ctrl+tab 切换标签页 ctrl+ 1/2...9 数字 切换到第几个标 ...
- 强大的Guava中的新集合类型: Multiset, Multimap, BiMap, Table, ClassToInstanceMap, RangeSet, RangeMap等
一 Multiset /** * 新类型集合: Multiset: Multiset就是可以保存多个相同的对象,并且无序 * 占据了List和Set之间的一个灰色地带 * 其他实现: TreeMult ...
- 大数据之Kafka史上最详细原理总结
Kafka Kafka是最初由Linkedin公司开发,是一个分布式.支持分区的(partition).多副本的(replica),基于zookeeper协调的分布式消息系统,它的最大的特性就是可以实 ...
- 基于Arduino开发的简易“高水位报警系统解决方案”
长期以来,针对“某些办公室空调没有排水系统,只能用水桶接水,经常造成水漫金山的问题”而提出来的. 材料:Arduino开发板一块.水位传感器一个.高电平蜂鸣器一个.杜邦线若干. 原理:将水位传感器置于 ...
- Django面试集锦(1-50)
目录 1.Django ORM查询中select_related和prefetch_related的区别? 2.only与defer的用法? 3.Django ORM是什么? 4.Django创建项目 ...
- qt creator源码全方面分析(2-1-1)
目录 C++的策略/二进制兼容性问题 定义 ABI注意事项 可做与不可做 库程序员的技巧 位标志 使用d指针 故障排除 在没有d指针的情况下将新数据成员添加到类中 添加已重新实现的虚函数 使用新类 向 ...
- 解决Apple Mobile Device USB Driver
在设备管理器里找到便携设备:APPLE IPHONE 更新驱动 自定义更新:在设备管理器里找到便携设备:APPLE IPHONE 更新驱动 自定义更新:C:\Program Files\Common ...