from scrapy import Spider, Request
from selenium import webdriver
class MySpider(Spider):
name = "my_spider"
def __init__(self):
self.browser = webdriver.Chrome(executable_path='E:/chromedriver')
self.browser.set_page_load_timeout(100)
def closed(self,spider):
print("spider closed")
self.browser.close()
def start_requests(self):
start_urls = []
with open("target_urls.txt", 'r', encoding='utf-8') as f:
for line in f:
url_id, url = line.split('\t\t')
start_urls.append(url)
for url in start_urls:
yield Request(url=url, callback=self.parse)
def parse(self, response):
yield {
'target_url': response.url,
'comments': response.xpath('//div[@class="comments"]//em//text()').extract()
}
以上是我的scrapy代码。我scrapy crawl my_spider -o comments.json用来运行爬虫。
您可能会注意到,对于每个 my url,都有一个url_id与之相关的独特之处。如何将每个抓取的结果与url_id. 理想情况下,我想url_id将 .yield 输出结果存储在comments.json.
繁花不似锦
江户川乱折腾
相关分类