爬蟲爬取大量高清壁紙
思路:
首先從網頁入手,觀察網頁整體結構,因爲本段代碼使用的是Xpath解析(想了解Xpath解析可以看上一篇)
import requests
from lxml import etree
class Spider:
def __init__(self):
self.toplist_image =[] # 初始化一級URL列表
self.a = 0 # 防止名字重複,定義數量值
self.file_name = "" # 文件名
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Accept": " text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip,deflate",
"Accept-Language": "zh-CN,zh;q=0.8"
} # 加上反扒請求頭
def req(self):
"""請求數據"""
print("正在獲取壁紙...")
for i in range(1,3): # 定義爬取的頁數
url = "https://wallhaven.cc/toplist?page=" + f"{i}" # 定義初始 URL,由於網頁原因,定義變量i進行翻頁操作
result = requests.get(url).content # 發起請求
html = etree.HTML(result)
title = html.xpath('//a[@class = "preview"]/@href')# 用XPATH解析網頁,提取需要的一級URL,返回一個列表
for url1 in title:
self.toplist_image.append(url1)# 把一級URL添加到toplist_image列表中
def download(self):
for i in self.toplist_image: # 遍歷一級URL列表
res = requests.get(i).content # 再次請求
html = etree.HTML(res)
title = html.xpath('//div[@class = "scrollbox"]/img/@src') # 獲取二級URL列表,返回列表
self.file_name = "C:\\bz\\"+f"壁紙{self.a+1}.jpg" # 定義圖片本地存儲路徑和名字
self.a+=1
print(f"正在下載-壁紙{self.a}.jpg")
for img in title: # 遍歷二級URL列表
resa = requests.get(img) # 請求二級URL
with open(self.file_name, mode="wb") as file:
file.write(resa.content) # 寫入本地文件
file.close() # 關閉
s = Spider()
s.req()
s.download()