按照课程写的爬虫程序但是输出没有任何结果?

主程序:

from baidubaike import url_manger, html_download, html_parser, html_outputer



class SpiderMain(object):

    def __init__(self):

        self.urls = url_manger.UrlManager()

        self.downloader = html_download.HtmlDownloader()

        self.parser = html_parser.HtmlParser()

        self.outputer = html_outputer.HtmlOutputer

   

    def craw(self,root_url):

        count = 1

        self.urls.add_new_url(root_url)

        while self.urls.has_new_url():

            try:

                new_url = self.urls.get_new_url()

                print('craw %d:%s'%(count,new_url))

                html_cont = self.downloader.download(new_url)

                new_urls,new_data = self.parser.parse(new_url,html_cont)

                self.urls.add_new_urls(new_urls)

                self.outputer.collect_data(new_data)

                

                if count == 1000:

                    break

                count +=1

            except:

                print('craw failed')

        self.outputer.output_html()

    

    




if __name__ =="main":

    root_url = "http://baike.baidu.com/view/21087.htm"

    obj_spider = SpiderMain()

    obj_spider.craw(root_url)

parser程序:

from bs4 import BeautifulSoup

import re

from urllib.parse import urljoin

class HtmlParser(object):

   


   def _get_new_urls(self, page_url, soup):

       res_data = {}

       

       res_data['url'] = page_url

       #<dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>

       title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1")

       res_data['title'] = title_node.get_text()

       #<div class="lemma-summary" label-module="lemmaSummary">

       summary_node = soup.find('div',class_="lemma-summary")

       res_data['summary'] = summary_node.get_text()

       return res_data

   

   def _get_new_data(self, page_url, soup):

       new_urls = set()

       links = soup.find_all('a',re.compile(r'/view/\d+\.htm'))

       for link in links:

           new_url = link['href']

           new_full_url = urljoin(page_url,new_url)

           new_urls.add(new_full_url)

       return new_urls

   def parse(self,page_url,html_cont):

       if page_url is None or html_cont is None:

           return

          

       soup = BeautifulSoup(html_cont,'html.parser',from_encoding = 'utf-8')

       new_urls = self._get_new_urls(page_url,soup)

       new_data = self._get_new_data(page_url,soup)

       return new_urls,new_data

outputer程序:

class HtmlOutputer(object):

   def __init__(self):

       self.datas = []

       

   def collect_data(self,data):

       if data is None:

           return

       self.datas.append(data)

      

   def output_html(self):

       fout = open('output.html','w')

       

       fout.write('<html>')

       fout.write('<body>')

       fout.write('<table>')

       for data in self.datas:

           fout.write('<tr>')

           fout.write('<td>%s</td>'% data['url'].encode('utf-8'))

           fout.write('<td>%s</td>'% data['title'].encode('utf-8'))

           fout.write('<td>%s</td>'% data['summary'].encode('utf-8'))

       fout.write('</table>')

       fout.write('</body>')

       fout.write('</html>')


小宝君等期待
浏览 2589回答 3
3回答

wmmn

楼主问题解决了吗,我也出现了一样的问题

慕勒1493297

楼主问题解决了么~我现在也遇到了相同的问题~~

tanhouyusheng

是写到文件里的,你看看你的文件
打开App,查看更多内容
随时随地看视频慕课网APP

相关分类

Python