#!/usr/bin/env python # -*- coding:utf-8 -*- import threading from Queue import Queue from lxml import etree import requests import json import time class ThreadCrawl(threading.Thread): def __init__(self, threadName, pageQueue, dataQueue): #threading.Thread.__init__(self) super(ThreadCrawl, self).__init__() self.threadName = threadName self.pageQueue = pageQueue self.dataQueue = dataQueue self.headers = {"User-Agent" : "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"} def run(self): print "啓動 " + self.threadName while not CRAWL_EXIT: try: # 可選參數block,默認值爲True #1. 如果對列爲空,block爲True的話,不會結束,會進入阻塞狀態,直到隊列有新的數據 #2. 如果隊列爲空,block爲False的話,就彈出一個Queue.empty()異常, page = self.pageQueue.get(False) url = "https://www.qiushibaike.com/text/page/" + str(page) +"/" content = requests.get(url, headers = self.headers).text time.sleep(1) self.dataQueue.put(content) except: pass print "結束 " + self.threadName class ThreadParse(threading.Thread): def __init__(self, threadName, dataQueue, filename, lock): super(ThreadParse, self).__init__() self.threadName = threadName self.dataQueue = dataQueue self.filename = filename self.lock = lock def run(self): print "啓動" + self.threadName while not PARSE_EXIT: try: html = self.dataQueue.get(False) self.parse(html) except: pass print "退出" + self.threadName def parse(self, html): # 解析爲HTML DOM html = etree.HTML(html) node_list = html.xpath('//div[contains(@id, "qiushi_tag")]') for site in node_list: imgUrl = site.xpath('./div/a/img/@src')[0] username = site.xpath('./div/a/h2')[0].text.strip() rank = site.xpath('./div/div')[0].text content = site.xpath('.//div[@class="content"]/span')[0].text.strip() vote = site.xpath('.//span[@class="stats-vote"]/i')[0].text comments = site.xpath('.//span[@class="stats-comments"]//i')[0].text items = { "imgUrl": imgUrl, "username" : username, "rank" : rank, "content" : content, "vote" : vote, "comments" : comments } with self.lock: self.filename.write(json.dumps(items, ensure_ascii = False).encode("utf-8") + "\n") #採集 CRAWL_EXIT = False #解析 PARSE_EXIT = False def main(): pageQueue = Queue(20) for i in range(1, 21): pageQueue.put(i) # 採集結果(每頁的HTML源碼)的數據隊列,參數爲空表示不限制 dataQueue = Queue() filename = open("qiushi.json", "a") lock = threading.Lock() # 三個採集線程的名字 crawlList = ["採集線程1號", "採集線程2號", "採集線程3號"] # 存儲三個採集線程的列表集合 threadcrawl = [] for threadName in crawlList: thread = ThreadCrawl(threadName, pageQueue, dataQueue) thread.start() threadcrawl.append(thread) # 三個解析線程的名字 parseList = ["解析線程1號","解析線程2號","解析線程3號"] # 存儲三個解析線程 threadparse = [] for threadName in parseList: thread = ThreadParse(threadName, dataQueue, filename, lock) thread.start() threadparse.append(thread) # 等待pageQueue隊列爲空,也就是等待之前的操作執行完畢 while not pageQueue.empty(): pass # 如果pageQueue爲空,採集線程退出循環 global CRAWL_EXIT CRAWL_EXIT = True print "pageQueue爲空" for thread in threadcrawl: thread.join() print "1" while not dataQueue.empty(): pass global PARSE_EXIT PARSE_EXIT = True for thread in threadparse: thread.join() print "2" with lock: filename.close() print "謝謝使用!" if __name__ == "__main__": main()
多線程抓取糗事百科
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.