一、爬蟲任務
爬取網站的標題
步驟:
1、創建proxyHandler(代理ip)->創建opener
2、創建請求對象(不同瀏覽器)
3、發送請求
4、正則清洗數據
二、python腳本
import random
import re
from urllib import request
#url
url="http://www.baidu.com"
#代理ip
proxylist=[{"http":"120.194.18.90:81"},{"http":"39.137.168.229:80"}]
proxy=random.choice(proxylist)
print(proxy)
#在headers設置不同User-Agent,模擬不同瀏覽器
agent1="Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)"
agent2="Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.142 Safari/535.19"
agent3="Mozilla/5.0 (Windows NT 6.1; rv:17.0) Gecko/20100101 Firefox/17.0 "
agent4="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2"
agent5="Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5"
agentlist=[agent1,agent2,agent3,agent4,agent5]
agent=random.choice(agentlist)
print(agent)
headers={"User-Agent":agent}
#創建處理器,用來創建opener
proxyHandler=request.ProxyHandler(proxy)
#創建opener
opener=request.build_opener(proxyHandler)
#創建請求對象
req=request.Request(url,headers=headers)
#發送請求,返回響應
response=opener.open(req).read().decode()
#用正則清洗數據
pat=r'<title>(.*?)</title>'
data=re.findall(pat,response)
#輸出結果
print(data[0])