colly文檔

c.OnRequest(func(r *colly.Request) {
    fmt.Println("Visiting", r.URL)
})

c.OnError(func(_ *colly.Response, err error) {
    log.Println("Something went wrong:", err)
})

c.OnResponse(func(r *colly.Response) {
    fmt.Println("Visited", r.Request.URL)
})

c.OnHTML("a[href]", func(e *colly.HTMLElement) {
    e.Request.Visit(e.Attr("href"))
})

c.OnHTML("tr td:nth-of-type(1)", func(e *colly.HTMLElement) {
    fmt.Println("First column of a table row:", e.Text)
})

c.OnXML("http://h1", func(e *colly.XMLElement) {
    fmt.Println(e.Text)
})

c.OnScraped(func(r *colly.Response) {
    fmt.Println("Finished", r.Request.URL)
})
  1. OnRequest
    Called before a request
c.OnRequest(func(r *colly.Request) {
        fmt.Println("Visiting", r.URL.String())
    })
  1. OnError
    Called if error occured during the request
c.OnError(func(r *colly.Response, err error) {
        fmt.Println("Request URL:", r.Request.URL, "failed with response:", r, "\nError:", err)
    })
  1. OnResponse
    Called after response received
c.OnResponse(func(r *colly.Response) {
        log.Println("response received", r.StatusCode)
    })
  1. OnHTML
    Called right after OnResponse if the received content is HTML

  2. OnXML
    Called right after OnHTML if the received content is HTML or XML

  3. OnScraped
    Called after OnXML callbacks

1.Collector參數(shù)

設(shè)置User-Agent
colly.NewCollector( colly.UserAgent("xy"))
c2 := colly.NewCollector();c2.UserAgent="xy"
動(dòng)態(tài)設(shè)置User-Agent
c := colly.NewCollector();c.OnRequest(func(r *colly.Request){r.Headers.Set("User-Agent", RandomString()) })

設(shè)置url不去重
colly.AllowURLRevisit()
c2 := colly.NewCollector();c2.AllowURLRevisit=true
收集器配置項(xiàng)
ALLOWED_DOMAINS (comma separated list of domains) // 允許的域名

c := colly.NewCollector(
        // Visit only domains: hackerspaces.org, wiki.hackerspaces.org
        colly.AllowedDomains("hackerspaces.org", "wiki.hackerspaces.org"),
    )

CACHE_DIR (string) // 緩存dir

colly.NewCollector(
    colly.CacheDir("./cache"))

DETECT_CHARSET (y/n)
DISABLE_COOKIES (y/n)
DISALLOWED_DOMAINS (comma separated list of domains)
IGNORE_ROBOTSTXT (y/n)
MAX_BODY_SIZE (int)
MAX_DEPTH (int - 0 means infinite)

c := colly.NewCollector(
        // MaxDepth is 1, so only the links on the scraped page
        // is visited, and no further links are followed
        colly.MaxDepth(1),
    )

PARSE_HTTP_ERROR_RESPONSE (y/n)
USER_AGENT (string)


// HTTP設(shè)置
c := colly.NewCollector()
c.WithTransport(&http.Transport{
    Proxy: http.ProxyFromEnvironment,
    DialContext: (&net.Dialer{
        Timeout:   30 * time.Second,
        KeepAlive: 30 * time.Second,
        DualStack: true,
    }).DialContext,
    MaxIdleConns:          100,
    IdleConnTimeout:       90 * time.Second,
    TLSHandshakeTimeout:   10 * time.Second,
    ExpectContinueTimeout: 1 * time.Second,
}

開啟debug

import (
    "github.com/gocolly/colly"
    "github.com/gocolly/colly/debug"
)

func main() {
    c := colly.NewCollector(colly.Debugger(&debug.LogDebugger{}))
    // [..]
}

設(shè)置代理

package main

import (
    "github.com/gocolly/colly"
    "github.com/gocolly/colly/proxy"
)

func main() {
    c := colly.NewCollector()

    if p, err := proxy.RoundRobinProxySwitcher(
        "socks5://127.0.0.1:1337",
        "socks5://127.0.0.1:1338",
        "http://127.0.0.1:8080",
    ); err == nil {
        c.SetProxyFunc(p)
    }
    // ...
}

自定義代理切換器

var proxies []*url.URL = []*url.URL{
    &url.URL{Host: "127.0.0.1:8080"},
    &url.URL{Host: "127.0.0.1:8081"},
}

func randomProxySwitcher(_ *http.Request) (*url.URL, error) {
    return proxies[random.Intn(len(proxies))], nil
}

// ...
c.SetProxyFunc(randomProxySwitcher)```

代理完整DEMO

package main

import (
    "bytes"
    "log"

    "github.com/gocolly/colly"
    "github.com/gocolly/colly/proxy"
)

func main() {
    // Instantiate default collector
    c := colly.NewCollector(colly.AllowURLRevisit())

    // Rotate two socks5 proxies
    rp, err := proxy.RoundRobinProxySwitcher("socks5://127.0.0.1:1337", "socks5://127.0.0.1:1338")
    if err != nil {
        log.Fatal(err)
    }
    c.SetProxyFunc(rp)

    // Print the response
    c.OnResponse(func(r *colly.Response) {
        log.Printf("%s\n", bytes.Replace(r.Body, []byte("\n"), nil, -1))
    })

    // Fetch httpbin.org/ip five times
    for i := 0; i < 5; i++ {
        c.Visit("https://httpbin.org/ip")
    }
}

使用Clone方法復(fù)制相似配置的收集器

c := colly.NewCollector(
    colly.UserAgent("myUserAgent"),
    colly.AllowedDomains("foo.com", "bar.com"),
)
// Custom User-Agent and allowed domains are cloned to c2

c2 := c.Clone()

使用context傳遞上下文

c.OnResponse(func(r *colly.Response) {
    r.Ctx.Put(r.Headers.Get("Custom-Header"))
    c2.Request("GET", "https://foo.com/", nil, r.Ctx, nil)
})

特別注意 設(shè)置代理與設(shè)置緩存文件目前是沖突的.
禁止 keep-alive

c := colly.NewCollector()
c.WithTransport(&http.Transport{
    DisableKeepAlives: true,
})

異步收集

c := colly.NewCollector(
        colly.Async(true),
    )
Collector.Async = true
// 記住使用c.Wait()

異步收集完整Demo

import (
    "fmt"

    "github.com/gocolly/colly"
)

func main() {
    // Instantiate default collector
    c := colly.NewCollector(
        // MaxDepth is 2, so only the links on the scraped page
        // and links on those pages are visited
        colly.MaxDepth(2),
        colly.Async(true),
    )

    // Limit the maximum parallelism to 2
    // This is necessary if the goroutines are dynamically
    // created to control the limit of simultaneous requests.
    //
    // Parallelism can be controlled also by spawning fixed
    // number of go routines.
    c.Limit(&colly.LimitRule{DomainGlob: "*", Parallelism: 2})

    // On every a element which has href attribute call callback
    c.OnHTML("a[href]", func(e *colly.HTMLElement) {
        link := e.Attr("href")
        // Print link
        fmt.Println(link)
        // Visit link found on page on a new thread
        e.Request.Visit(link)
    })

    // Start scraping on https://en.wikipedia.org
    c.Visit("https://en.wikipedia.org/")
    // Wait until threads are finished
    c.Wait()
}

colly自帶的插件

import (
    "log"

    "github.com/gocolly/colly"
    "github.com/gocolly/colly/extensions"
)

func main() {
    c := colly.NewCollector()
    visited := false

    extensions.RandomUserAgent(c) // 隨機(jī)UA
    extensions.Referrer(c) // referer自動(dòng)填寫

    c.OnResponse(func(r *colly.Response) {
        log.Println(string(r.Body))
        if !visited {
            visited = true
            r.Request.Visit("/get?q=2")
        }
    })

    c.Visit("http://httpbin.org/get")
}

目前支持的插件

func RandomUserAgent

func Referer

func URLLengthFilter

Post表單

func generateFormData() map[string][]byte {
    f, _ := os.Open("gocolly.jpg")
    defer f.Close()

    imgData, _ := ioutil.ReadAll(f)

    return map[string][]byte{
        "firstname": []byte("one"),
        "lastname":  []byte("two"),
        "email":     []byte("onetwo@example.com"),
        "file":      imgData,
    }
}
c.PostMultipart("http://localhost:8080/", generateFormData())

PostJson

c.OnRequest(func(r *colly.Request) {
        r.Headers.Set("Content-Type", "application/json;charset=UTF-8")
    })
c.PostRaw(fmt.Sprintf(amac_list_post_url, page), []byte("{}"))

隊(duì)列Queue

package main

import (
    "fmt"

    "github.com/gocolly/colly"
    "github.com/gocolly/colly/queue"
)

func main() {
    url := "https://httpbin.org/delay/1"

    // Instantiate default collector
    c := colly.NewCollector()

    // create a request queue with 2 consumer threads
    q, _ := queue.New(
        2, // Number of consumer threads
        &queue.InMemoryQueueStorage{MaxSize: 10000}, // Use default queue storage
    )

    c.OnRequest(func(r *colly.Request) {
        fmt.Println("visiting", r.URL)
    })

    for i := 0; i < 5; i++ {
        // Add URLs to the queue
        q.AddURL(fmt.Sprintf("%s?n=%d", url, i))
    }
    // Consume URLs
    q.Run(c)

}

隨機(jī)延遲

package main

import (
    "fmt"
    "time"

    "github.com/gocolly/colly"
    "github.com/gocolly/colly/debug"
)

func main() {
    url := "https://httpbin.org/delay/2"

    // Instantiate default collector
    c := colly.NewCollector(
        // Attach a debugger to the collector
        colly.Debugger(&debug.LogDebugger{}),
        colly.Async(true),
    )

    // Limit the number of threads started by colly to two
    // when visiting links which domains' matches "*httpbin.*" glob
    c.Limit(&colly.LimitRule{
        DomainGlob:  "*httpbin.*",
        Parallelism: 2,
        RandomDelay: 5 * time.Second,
    })

    // Start scraping in four threads on https://httpbin.org/delay/2
    for i := 0; i < 4; i++ {
        c.Visit(fmt.Sprintf("%s?n=%d", url, i))
    }
    // Start scraping on https://httpbin.org/delay/2
    c.Visit(url)
    // Wait until threads are finished
    c.Wait()
}

并發(fā)限制

package main

import (
    "fmt"

    "github.com/gocolly/colly"
    "github.com/gocolly/colly/debug"
)

func main() {
    url := "https://httpbin.org/delay/2"

    // Instantiate default collector
    c := colly.NewCollector(
        // Turn on asynchronous requests
        colly.Async(true),
        // Attach a debugger to the collector
        colly.Debugger(&debug.LogDebugger{}),
    )

    // Limit the number of threads started by colly to two
    // when visiting links which domains' matches "*httpbin.*" glob
    c.Limit(&colly.LimitRule{
        DomainGlob:  "*httpbin.*",
        Parallelism: 2,
        //Delay:      5 * time.Second,
    })

    // Start scraping in five threads on https://httpbin.org/delay/2
    for i := 0; i < 5; i++ {
        c.Visit(fmt.Sprintf("%s?n=%d", url, i))
    }
    // Wait until threads are finished
    c.Wait()
}

Redis做隊(duì)列

package main

import (
    "log"

    "github.com/gocolly/colly"
    "github.com/gocolly/colly/queue"
    "github.com/gocolly/redisstorage"
)

func main() {
    urls := []string{
        "http://httpbin.org/",
        "http://httpbin.org/ip",
        "http://httpbin.org/cookies/set?a=b&c=d",
        "http://httpbin.org/cookies",
    }

    c := colly.NewCollector()

    // create the redis storage
    storage := &redisstorage.Storage{
        Address:  "127.0.0.1:6379",
        Password: "",
        DB:       0,
        Prefix:   "httpbin_test",
    }

    // add storage to the collector
    err := c.SetStorage(storage)
    if err != nil {
        panic(err)
    }

    // delete previous data from storage
    if err := storage.Clear(); err != nil {
        log.Fatal(err)
    }

    // close redis client
    defer storage.Client.Close()

    // create a new request queue with redis storage backend
    q, _ := queue.New(2, storage)

    c.OnResponse(func(r *colly.Response) {
        log.Println("Cookies:", c.Cookies(r.Request.URL.String()))
    })

    // add URLs to the queue
    for _, u := range urls {
        q.AddURL(u)
    }
    // consume requests
    q.Run(c)
}

上下文管理

package main

import (
    "fmt"

    "github.com/gocolly/colly"
)

func main() {
    // Instantiate default collector
    c := colly.NewCollector()

    // Before making a request put the URL with
    // the key of "url" into the context of the request
    c.OnRequest(func(r *colly.Request) {
        r.Ctx.Put("url", r.URL.String())
    })

    // After making a request get "url" from
    // the context of the request
    c.OnResponse(func(r *colly.Response) {
        fmt.Println(r.Ctx.Get("url"))
    })

    // Start scraping on https://en.wikipedia.org
    c.Visit("https://en.wikipedia.org/")
}

爬蟲服務(wù)器

package main

import (
    "encoding/json"
    "log"
    "net/http"

    "github.com/gocolly/colly"
)

type pageInfo struct {
    StatusCode int
    Links      map[string]int
}

func handler(w http.ResponseWriter, r *http.Request) {
    URL := r.URL.Query().Get("url")
    if URL == "" {
        log.Println("missing URL argument")
        return
    }
    log.Println("visiting", URL)

    c := colly.NewCollector()

    p := &pageInfo{Links: make(map[string]int)}

    // count links
    c.OnHTML("a[href]", func(e *colly.HTMLElement) {
        link := e.Request.AbsoluteURL(e.Attr("href"))
        if link != "" {
            p.Links[link]++
        }
    })

    // extract status code
    c.OnResponse(func(r *colly.Response) {
        log.Println("response received", r.StatusCode)
        p.StatusCode = r.StatusCode
    })
    c.OnError(func(r *colly.Response, err error) {
        log.Println("error:", r.StatusCode, err)
        p.StatusCode = r.StatusCode
    })

    c.Visit(URL)

    // dump results
    b, err := json.Marshal(p)
    if err != nil {
        log.Println("failed to serialize response:", err)
        return
    }
    w.Header().Add("Content-Type", "application/json")
    w.Write(b)
}

func main() {
    // example usage: curl -s 'http://127.0.0.1:7171/?url=http://go-colly.org/'
    addr := ":7171"

    http.HandleFunc("/", handler)

    log.Println("listening on", addr)
    log.Fatal(http.ListenAndServe(addr, nil))
}

URL正則過濾

package main

import (
    "fmt"
    "regexp"

    "github.com/gocolly/colly"
)

func main() {
    // Instantiate default collector
    c := colly.NewCollector(
        // Visit only root url and urls which start with "e" or "h" on httpbin.org
        colly.URLFilters(
            regexp.MustCompile("http://httpbin\\.org/(|e.+)$"),
            regexp.MustCompile("http://httpbin\\.org/h.+"),
        ),
    )

    // On every a element which has href attribute call callback
    c.OnHTML("a[href]", func(e *colly.HTMLElement) {
        link := e.Attr("href")
        // Print link
        fmt.Printf("Link found: %q -> %s\n", e.Text, link)
        // Visit link found on page
        // Only those links are visited which are matched by  any of the URLFilter regexps
        c.Visit(e.Request.AbsoluteURL(link))
    })

    // Before making a request print "Visiting ..."
    c.OnRequest(func(r *colly.Request) {
        fmt.Println("Visiting", r.URL.String())
    })

    // Start scraping on http://httpbin.org
    c.Visit("http://httpbin.org/")
}
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
  • 序言:七十年代末,一起剝皮案震驚了整個(gè)濱河市夺荒,隨后出現(xiàn)的幾起案子倒慧,更是在濱河造成了極大的恐慌鸟蜡,老刑警劉巖,帶你破解...
    沈念sama閱讀 212,542評(píng)論 6 493
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件扣唱,死亡現(xiàn)場(chǎng)離奇詭異篙悯,居然都是意外死亡,警方通過查閱死者的電腦和手機(jī)球昨,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 90,596評(píng)論 3 385
  • 文/潘曉璐 我一進(jìn)店門,熙熙樓的掌柜王于貴愁眉苦臉地迎上來眨攘,“玉大人,你說我怎么就攤上這事嚣州■晔郏” “怎么了?”我有些...
    開封第一講書人閱讀 158,021評(píng)論 0 348
  • 文/不壞的土叔 我叫張陵该肴,是天一觀的道長(zhǎng)情竹。 經(jīng)常有香客問我,道長(zhǎng)匀哄,這世上最難降的妖魔是什么秦效? 我笑而不...
    開封第一講書人閱讀 56,682評(píng)論 1 284
  • 正文 為了忘掉前任,我火速辦了婚禮涎嚼,結(jié)果婚禮上阱州,老公的妹妹穿的比我還像新娘。我一直安慰自己法梯,他們只是感情好苔货,可當(dāng)我...
    茶點(diǎn)故事閱讀 65,792評(píng)論 6 386
  • 文/花漫 我一把揭開白布。 她就那樣靜靜地躺著立哑,像睡著了一般夜惭。 火紅的嫁衣襯著肌膚如雪。 梳的紋絲不亂的頭發(fā)上铛绰,一...
    開封第一講書人閱讀 49,985評(píng)論 1 291
  • 那天诈茧,我揣著相機(jī)與錄音,去河邊找鬼捂掰。 笑死敢会,一個(gè)胖子當(dāng)著我的面吹牛镊叁,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播走触,決...
    沈念sama閱讀 39,107評(píng)論 3 410
  • 文/蒼蘭香墨 我猛地睜開眼晦譬,長(zhǎng)吁一口氣:“原來是場(chǎng)噩夢(mèng)啊……” “哼!你這毒婦竟也來了互广?” 一聲冷哼從身側(cè)響起敛腌,我...
    開封第一講書人閱讀 37,845評(píng)論 0 268
  • 序言:老撾萬榮一對(duì)情侶失蹤,失蹤者是張志新(化名)和其女友劉穎惫皱,沒想到半個(gè)月后像樊,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體,經(jīng)...
    沈念sama閱讀 44,299評(píng)論 1 303
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡旅敷,尸身上長(zhǎng)有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 36,612評(píng)論 2 327
  • 正文 我和宋清朗相戀三年生棍,在試婚紗的時(shí)候發(fā)現(xiàn)自己被綠了。 大學(xué)時(shí)的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片媳谁。...
    茶點(diǎn)故事閱讀 38,747評(píng)論 1 341
  • 序言:一個(gè)原本活蹦亂跳的男人離奇死亡涂滴,死狀恐怖,靈堂內(nèi)的尸體忽然破棺而出晴音,到底是詐尸還是另有隱情柔纵,我是刑警寧澤,帶...
    沈念sama閱讀 34,441評(píng)論 4 333
  • 正文 年R本政府宣布锤躁,位于F島的核電站搁料,受9級(jí)特大地震影響,放射性物質(zhì)發(fā)生泄漏系羞。R本人自食惡果不足惜郭计,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 40,072評(píng)論 3 317
  • 文/蒙蒙 一、第九天 我趴在偏房一處隱蔽的房頂上張望椒振。 院中可真熱鬧昭伸,春花似錦、人聲如沸杠人。這莊子的主人今日做“春日...
    開封第一講書人閱讀 30,828評(píng)論 0 21
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽(yáng)嗡善。三九已至辑莫,卻和暖如春,著一層夾襖步出監(jiān)牢的瞬間罩引,已是汗流浹背各吨。 一陣腳步聲響...
    開封第一講書人閱讀 32,069評(píng)論 1 267
  • 我被黑心中介騙來泰國(guó)打工, 沒想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留,地道東北人揭蜒。 一個(gè)月前我還...
    沈念sama閱讀 46,545評(píng)論 2 362
  • 正文 我出身青樓横浑,卻偏偏與公主長(zhǎng)得像,于是被迫代替她去往敵國(guó)和親屉更。 傳聞我的和親對(duì)象是個(gè)殘疾皇子徙融,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 43,658評(píng)論 2 350