网络爬虫(Web Spider),又被称为网页蜘蛛,是一种按照一定的规则,自动地抓取网站信息的程序或者脚本。
爬虫流程:
1,先由 urllib 的 request 打开 Url 得到网页 html 文档
2,浏览器打开网页源代码分析元素节点
3,通过 Beautiful Soup 或正则表达式提取想要的数据
4,存储数据到本地磁盘或数据库(抓取,分析,存储)
下面以爬取 笔趣网 里面的小说为例:
整个逻辑我这里倒着来讲述;
下图是 一篇小说某个章节的正文内容,我们用 Chrome 浏览器打开后,右键 -> 检查 ,再按照下图操作;
从上图可以,看出 一篇小说的某个章节的正文在 <div class = ‘showtxt’> 的节点里面,知道这个规律之后,我们就可以开始码代码了;
def getContent(target):
req = requests.get(url = target)
# 这里注意 encode,否则可能乱码
html = req.text.encode(req.encoding, 'ignore')
bf = BeautifulSoup(html, "lxml")
# 找到所有 <div class = 'showtxt'> 节点
texts = bf.find_all('div', class_ = 'showtxt')
# 替换掉每个段落前面的 8个
texts = texts[0].text.replace('\xa0'*8,'\n\n')
return texts
上面这段代码是拿到某个章节的内容, 接下来看看 一篇小说的所有章节怎么拿到?
上图是 一篇小说的所有章节目录,从上面这个规律,可以发现就是 <div class = ‘listmain’> 里面的所有 <a> 标签;
def getZjUrl(bookurl):
zjlist = []
server = 'http://www.biqukan.com/'
req = requests.get(url = bookurl)
# 这里注意 encode,否则可能乱码
html = req.text.encode(req.encoding, 'ignore')
div_bf = BeautifulSoup(html, "lxml")
# 找到所有 <div class = 'listmain'> 节点
div = div_bf.find_all('div', class_ = 'listmain')
# 再在 div 里面找到所有 <a> 节点
a_bf = BeautifulSoup(str(div[0]), "lxml")
a = a_bf.find_all('a')
for each in a:
zjlist.append((each.string, server + each.get('href')))
return zjlist
最后就是把小说内容写到硬盘里面;
def writer(name, path, text):
f = open(path, 'a', encoding='utf-8')
f.write(name + '\n')
f.writelines(text)
f.write('\n\n')
最后附上 完整的代码;
注意:完整的代码是 抓取一个作者的所有小说到本地硬盘的,所以很慢,建议别直接运行
from bs4 import BeautifulSoup
import requests, sys, os, multiprocessing, threading
def getBookUrl(author):
booklist = []
server = 'http://www.biqukan.com/'
target = 'https://so.biqusoso.com/s.php?ie=utf-8&siteid=biqukan.com&q='+author
req = requests.get(url = target)
html = req.text.encode(req.encoding, 'ignore')
div_bf = BeautifulSoup(html, "lxml")
div = div_bf.find_all('div', class_ = 'search-list')
a_bf = BeautifulSoup(str(div[0]), "lxml")
a = a_bf.find_all('a')
zz = a_bf.find_all('span', class_ = 's4')
for index in range(0,len(a)):
each = a[index]
booklist.append((each.string, each.get('href'), zz[index+1].string))
return booklist
def getZjUrl(bookurl):
zjlist = []
server = 'http://www.biqukan.com/'
req = requests.get(url = bookurl)
# 这里注意 encode,否则可能乱码
html = req.text.encode(req.encoding, 'ignore')
div_bf = BeautifulSoup(html, "lxml")
# 找到所有 <div class = 'listmain'> 节点
div = div_bf.find_all('div', class_ = 'listmain')
# 再在 div 里面找到所有 <a> 节点
a_bf = BeautifulSoup(str(div[0]), "lxml")
a = a_bf.find_all('a')
for each in a:
zjlist.append((each.string, server + each.get('href')))
return zjlist
def getContent(target):
req = requests.get(url = target)
# 这里注意 encode,否则可能乱码
html = req.text.encode(req.encoding, 'ignore')
bf = BeautifulSoup(html, "lxml")
# 找到所有 <div class = 'showtxt'> 节点
texts = bf.find_all('div', class_ = 'showtxt')
# 替换掉每个段落前面的 8个
texts = texts[0].text.replace('\xa0'*8,'\n\n')
return texts
def writer(name, path, text):
f = open(path, 'a', encoding='utf-8')
f.write(name + '\n')
f.writelines(text)
f.write('\n\n')
# book = ('书名', '链接', '作者')
def saveOneBook(book, qauthor):
if book[2] == qauthor :
if not os.path.exists(qauthor) :
os.mkdir(qauthor)
zjlist = getZjUrl(book[1])
filename = qauthor + '/' + book[0] + '.txt'
isFileExist = os.path.exists(filename)
print(book[0] + str(isFileExist))
if not isFileExist :
print(filename + ' 开始下载:')
for zjIndex in range(12,len(zjlist)):
texts = getContent(zjlist[zjIndex][1])
writer(zjlist[zjIndex][0], filename,texts)
sys.stdout.write(book[0] + " 已下载:%d%%" % int(zjIndex * 100 / len(zjlist)) + '\r')
sys.stdout.flush()
print(filename + ' 下载完成')
def saveAllBookBy(qauthor):
booklist = getBookUrl(qauthor)
for bookIndex in range(0,len(booklist)):
book = booklist[bookIndex]
saveOneBook(book, qauthor)
# '我吃西红柿', 'zhttty' ,'耳根' ,'天蚕土豆' ,'忘语', '唐家三少', '辰东', '鱼人二代', '蝴蝶蓝', '萧鼎','跳舞','猫腻','烟雨江南','梦入神机','发飙的蜗牛'
# 最后这里 因为主要是I/O操作多,用了多线程
if __name__ == "__main__":
qauthorlist = ['萧鼎']
for qauthor in qauthorlist:
booklist = getBookUrl(qauthor)
for book in booklist:
t1 = threading.Thread(target=saveOneBook, args=(book, qauthor,))
t1.start()