这是一个爬取壁纸的爬虫,网址:http://www.win4000.com/wallpaper_205_0_10_1.html
爬取过程:
- 1.打开网址,是套图,所以先收集套图的跳转链接,以套图的名称创建一个TXT文本
- 2.通过跳转链接,收集每套图的图片链接,并写道对应的TXT文本中
- 3.遍历文件夹下所有TXT文件,下载里面的图片,每套图分别保存到不同的文件下
涉及技术:
- 自定义本机header
- 使用Xpath解析网页
- 常见爬虫流程
各种网页解析方案可以看这:https://developer.51cto.com/art/201912/608581.htm,介绍正则表达式、BS4、XPath以及requests-html
1.自定义header
F12查看网页代码找到network(如果没有内容,F5刷新)
随意找一个文件,右击它,如图示,点击Copy as cURL(bash)
访问网站:https://curl.trillworks.com/
参考:https://segmentfault.com/a/1190000019926385
2.使用xpath
xpath,可以抽象成windows文件系统一样,通过路径寻找标签,推荐插件:XPath Helper(用来验证你写语句)
找到你要的元素并复制XPath,如图所示:
插件使用如图所示:
XPath语法可以看这:https://www.w3school.com.cn/xpath/xpath_syntax.asp
3.附源码(流程都在注释里了)
# coding=utf-8
import os
import sys
import time
import requests
from lxml import etree
class Reptile:
"爬虫类:获取图片的URL,保存图片"
def __init__(self):
super().__init__()
self.base_url = "http://www.win4000.com/wallpaper_205_0_10_1.html"
self.headers = { # 自定义请求头
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'}
self.image_url_dir = os.path.join(sys.path[0], "image_url") # 图片链接保存的目录
self.image_dir = os.path.join(sys.path[0], "image") # 图片下载保存的目录
def make_dir(self, dir_name):
# 判断文件是否存在,不存在就新建一个
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def get_html(self, URL):
# 发出请求,获取网页,设置网络延时限定:connect=6.05, read=30
try:
resp = requests.get(url=URL, headers=self.headers, timeout=(6.05, 30)) # 发出请求
if resp.status_code == 200:
return resp
except Exception as e:
print("地址({0})出错:{1}".format(URL, e))
def get_url(self):
self.make_dir(self.image_url_dir) # 创建文件夹
# 获取图片的URL
resp = self.get_html(self.base_url) # 发出请求
resp_html = etree.HTML(resp.text) # 获取页面数据
jump_urls = resp_html.xpath('./body/div[4]/div/div[3]/div/div/div/div/div/ul/li/a/@href') # 跳转地址
jump_titles = resp_html.xpath('./body/div[4]/div/div[3]/div/div/div/div/div/ul/li/a/@title') # 套图题目
print("当前页面套图数:", len(jump_urls), len(jump_titles))
# 每个套图的跳转地址,逐一访问,保存所有的图片网址
for index in range(len(jump_urls)):
url_exect = jump_urls[index]
file_name = os.path.join(self.image_url_dir, jump_titles[index]) + ".txt" # 保存当前套图的文件名
print("index:", index + 1, " ", jump_urls[index], jump_titles[index])
# 寻找图片链接
num = 1
sign = True
with open(file_name, "a", encoding="utf-8") as output:
while sign:
detail = self.get_html(url_exect) # 跳转的网页
detail_html = etree.HTML(detail.text)
# 当前网页的图片的标题
title = detail_html.xpath('./body/div[4]/div/div[2]/div/div[2]/div/div[@class="pic-meinv"]/a/img/@title')[0]
# print("image{}:{}".format(num, title))
# 通过title判断是不是同一套图的内容
if title == jump_titles[index]:
url_exect = detail_html.xpath('./body/div[4]/div/div[2]/div/div[2]/div/div[@class="pic-meinv"]/a/@href')[0] # 下一张图片所在网址
image = detail_html.xpath('./body/div[4]/div/div[2]/div/div[2]/div/div[@class="pic-meinv"]/a/img/@src') # 图片资源
output.write(image[0])
output.write("\n")
else:
sign = False
num += 1
index += 1
def down_image(self):
# 下载图片
if not os.path.exists(self.image_url_dir):
print("image_url文件夹不存在")
return
files = os.listdir(self.image_url_dir)
if len(files) == 0:
print("image_url文件夹内不存在文件")
return
self.make_dir(self.image_dir) # 创建一个用于保存图片的文件夹image
# 读取文件,并下载
for file in files:
image_dir = os.path.join(self.image_dir, file[:-4])
self.make_dir(image_dir) # 每一类图片单独创建一个文件夹
with open(os.path.join(self.image_url_dir, file), "r", encoding="utf-8") as input:
rows = input.readlines()
# print(rows)
index = 1
for row in rows:
print("正在下载:{}第{}张图片……".format(file[:-4], index))
image_name = os.path.join(image_dir, str(index) + ".jpg")
image = self.get_html(row[:-1]) # 获取图片
with open(image_name, "wb+") as output:
output.write(image.content)
index += 1
if __name__ == '__main__':
reptile = Reptile()
t = time.time()
reptile.get_url() # 获取图片链接
print(f"get_url used time:{time.time() - t}")
# get_url used time:101.54531002044678
t = time.time()
reptile.down_image() # 下载图片
print(f"down_image used time:{time.time() - t}")
# down_image used time:604.7457594871521