2.6贴吧实战
from urllib import request
#请求网页页面,并返回相关内容
def loadpage(url,filename):
print("正在下载内容"+filename)
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763"}
req= request.Request(url,headers=header)
response = request.urlopen(req).read()#rea()添加decode()会出错
return response
#将爬取的数据进行存储
def writepage(html,filename):
print("正在存储信息")
with open(filename,"wb") as f:
f.write(html)
print(".........")
#理清页数,编写爬虫
def spider(url,beginpage,endpage):#需要爬取的首页与尾页
for yeshu in range(beginpage,endpage+1):#range取到的最大值是endpage-1
yema=(yeshu-1)*50#第一页是0
url = url + str(yema)#贴吧网址
filename = "第"+ str(yeshu) +"页"#文件名
html = loadpage(url,filename)
writepage(html,filename)
print("下载完成")
if __name__=="__main__":
url="https://tieba.baidu.com/f?kw=python&ie=utf-8&pn="
spider(url,4,5)
结果产生了两个网页内容的文件