1. 相關網址和庫
網址
http://www.allitebooks.org/
需要用到的庫
requests、beautifulsoup4
2. 代碼實現
import requests
from lxml import etree
from bs4 import BeautifulSoup
import json
import csv
class BookSpider(object):
def __init__(self):
self.base_url = 'http://www.allitebooks.com/page/{}'
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36 Edg/83.0.478.45'
}
self.data_list = []
# 1.構建所有url
def get_url_list(self):
url_list = []
for i in range(1, 6):
url = self.base_url.format(i)
url_list.append(url)
return url_list
# 2.發送請求
def send_request(self, url):
data = requests.get(url, headers=self.headers).content.decode('utf-8')
print(url)
return data
# 3.解析數據
def parse_bs4_data(self, data):
bs4_data = BeautifulSoup(data, 'lxml')
# 1) 取出所有的書
book_list = bs4_data.select('article')
# 2) 解析出每本書的信息
for book in book_list:
book_dict = {}
# 書名
book_dict['book_name'] = book.select_one('.entry-title').get_text()
# 書封面 img_url
book_dict['book_img_url'] = book.select_one('.attachment-post-thumbnail').get('src')
# 書的作者
book_dict['book_author'] = book.select_one('.entry-author').get_text()
# 書的簡介
book_dict['book_info'] = book.select_one('.entry-summary').get_text()
self.data_list.append(book_dict)
# 4.保存數據
def save_data(self):
json.dump(self.data_list, open("book.json", 'w'))
def save_data_csv(self):
sheet_title = self.data_list[0].keys()
sheet_data = []
for data in self.data_list:
sheet_data.append(data.values())
writer = csv.writer(open("book.csv", 'w', encoding='utf-8'))
writer.writerow(sheet_title)
writer.writerows(sheet_data)
def run(self):
url_list = self.get_url_list()
# 循環遍歷發送請求
for url in url_list:
data = self.send_request(url)
# self.parse_xpath_data(data)
self.parse_bs4_data(data)
self.save_data()
self.save_data_csv()
if __name__ == '__main__':
BookSpider().run()
3.小結
爬取電子書概要信息,保存爲 json 或 csv 格式文件。
還可以進一步進入書目詳情頁,獲取電子書詳細信息及下載鏈接。