众所周知,谷歌的本质是一家大型的爬虫公司,爬虫要提高爬取网站的效率,就需要使用并发爬取。
这又是 Go 擅长的领域了。直接上代码:
package main
import (
"fmt"
"sync"
"time"
)
// 声明两个全局变量,一把互斥锁,一个去重用的 Map
var mu sync.Mutex
var v map[string]int = make(map[string]int)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
if depth <= 0 {
return
}
mu.Lock() // 上锁,要操作去重 Map 了
if v[url] == 1 {
fmt.Printf("dedup: %s\n", url)
return
} else {
v[url] = 1
}
mu.Unlock() // 操作完了,别忘了解锁
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
for _, u := range urls {
go Crawl(u, depth-1, fetcher) // 调用协程进行并发爬取
}
return
}
func main() {
Crawl("https://golang.org/", 4, fetcher)
time.Sleep(time.Second) // 等待协程运行完毕
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
哦了,Go 教程到此为止了。
少年,利剑在手,目标是星辰大海!!!