一. 要求
爬取糗事百科段子,假設頁面的URL是 http://www.qiushibaike.com/8hr/page/1
-
使用requests獲取頁面信息,用XPath / re 做數據提取
-
獲取每個帖子裏的用戶頭像鏈接、用戶姓名、段子內容、點贊次數和評論次數
-
保存到 json 文件內
二. 參考代碼
#coding=utf-8
import requests
from retrying import retry
from lxml import etree
class Qiubai_spider():
def __init__(self):
self.url = "http://www.qiushibaike.com/8hr/page/{}/"
self.headers = {
"User-Agent":"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1 Trident/5.0;"
}
@retry(stop_max_attempt_number=5) #調用retry,當assert出錯時候,重複請求5次
def parse_url(self,url):
response = requests.get(url,timeout=10,headers=self.headers) #請求url
assert response.status_code==200 #當響應碼不是200時候,做斷言報錯處理
print(url)
return etree.HTML(response.text) #返回etree之後的html
def parse_content(self,html):
item_temp = html.xpath("//div[@class='article block untagged mb15']")
print(len(item_temp))
for item in item_temp:
#獲取用戶頭像地址
avatar = item.xpath("./div[1]/a[1]/img/@src")[0] if len(item.xpath("./div[1]/a[1]/img/@src"))>0 else None
#爲頭像地址添加前綴
if avatar is not None and not avatar.startswith("http:"):
avatar = "http:"+avatar
print(avatar)
name = item.xpath("./div[1]/a[2]/h2/text()")[0] #獲取用戶名
print(name)
content = item.xpath("./a[@class='contentHerf']/div/span/text()")[0] #獲取內容
print(content)
star_number = item.xpath("./div[@class='stats']/span[1]/i/text()")[0] #獲取點贊數
print(star_number)
comment_number = item.xpath("./div[@class='stats']/span[2]/a/i/text()")[0] #獲取評論數
print(comment_number)
print("*"*100)
def run(self):
'''函數的主要邏輯實現
'''
url = self.url.format(1) #獲取到url
html = self.parse_url(url) #請求url
self.parse_content(html) #解析頁面內容並把內容存入內容隊列
if __name__ == "__main__":
qiubai = Qiubai_spider()
qiubai.run()