我正在尝试解决有关使用缓存并行获取 URL 以避免重复的任务。我找到了正确的解决方案并且可以理解它。我看到正确的答案包含通道,并且 gorutine 通过 chan 将 URL 推送到缓存中。但为什么我的简单代码不能正常工作?我不知道哪里出错了。
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
var cache = struct {
cache map[string]int
mux sync.Mutex
}{cache: make(map[string]int)}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
// TODO: Fetch URLs in parallel.
// TODO: Don't fetch the same URL twice.
// This implementation doesn't do either:
if depth <= 0 {
return
}
cache.mux.Lock()
cache.cache[url] = 1 //put url in cache
cache.mux.Unlock()
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
for _, u := range urls {
cache.mux.Lock()
if _, ok := cache.cache[u]; !ok { //check if url already in cache
cache.mux.Unlock()
go Crawl(u, depth-1, fetcher)
} else {
cache.mux.Unlock()
}
}
return
}
func main() {
Crawl("http://golang.org/", 4, fetcher)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"http://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"http://golang.org/pkg/",
"http://golang.org/cmd/",
},
},
慕尼黑5688855
相关分类