这个是main #coding:utf8 from homework_test import url_manager, html_downloader, html_parser,\ html_outputer class SpiderMain(object): #构造函数,初始化各个对象 def __init__(self): #url管理器 self.urls = url_manager.UrlManager() #url下载器 self.downloader = html_downloader.HtmlDownloader() #url解析器 self.parser = html_parser.HtmlParser() #url输出器 self.outputer =html_outputer.HtmlOutputer() #爬虫的调度程序 def craw(self, root_url): #count记录当前爬取的是第几个url count = 1 #1.首先将入口url添加入管理器 self.urls.add_new_url(root_url) #启动爬虫循环,当管理器中有url时 while self.urls.has_new_url(): try: #获取一个待爬取的url new_url = self.urls.get_new_url() print 'craw %d : %s' % (count,new_url) #启动下载器下载页面 html_cont = self.downloader.download(new_url) #调用解析器解析页面数据,返回新的url列表和新的数据 new_urls,new_data = self.parser.parse(new_url,html_cont) #将新的url补充到管理器 self.urls.add_new_urls(new_urls) #收集数据 self.outputer.collect_data(new_data) #爬取3000个目标 if count == 100: break count = count + 1 except: print 'craw failed' #输出收集好的数据 self.outputer.output_html() #1.首先编写main函数 if __name__=="__main__": #2.编写入口的url root_url = "http://www.bmw.com.cn/zh_CN/index.html" obj_spider = SpiderMain() #3.启动爬虫 obj_spider.craw(root_url)
*********************************************************************
这个是parser
#coding:utf8
from bs4 import BeautifulSoup
import re
import urlparse
class HtmlParser(object):
def _get_new_urls(self, page_url, soup):
#将结果存到一个列表中
new_urls = set()
#/view/1234.htm
#href="//www.bmw.com.cn/zh_CN/topics/premium_selection.html"
#links = soup.find_all('a',href = re.compile(r"/view/\d+\.htm"))
links = soup.find_all('a',href = re.compile(r"//www\.bmw\.com\.cn/zh_CN/topics/\w+\.htm"))
for link in links:
new_url = link['href']
new_full_url = urlparse.urljoin(page_url, new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
res_data ={}
#url
res_data['url']=page_url
#<dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1>
#<li class="mainNavProductItemsSeries">
#ul class="mainNavItems"
#<ul class="ul_level_2">
#title_node = soup.find('ul',class_="ul_level_2").find("li")
#res_data['title'] = title_node.get_text()
#<div class="lemma-summary" label-module="lemmaSummary">
#<div class="mainNavTopicItemsLevel2"
#<div class="bottomNavBlock">
#<div class="overviewContainer">
summary_node = soup.find('div',class_="bottomNavBlock")
res_data['summary'] = summary_node.get_text()
return res_data
def parse(self,page_url,html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont,'html.parser',from_encoding='gb2312')
new_urls = self._get_new_urls(page_url,soup)
new_data = self._get_new_data(page_url,soup)
return new_urls,new_data
丶包菜
相关分类