import json
import re
import requests
from requests.exceptions import RequestException
#定義抓取一個頁面代碼
def get_one_page(url):
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.108 Safari/537.36 2345Explorer/8.7.0.16013'
}
#定義異常
try:
response=requests.get(url,headers=headers)
#根據狀態碼判斷是否抓取成功
if response.status_code==200:
return response.text
return None
except RequestException:
return None
#解析網頁
def parse_one_page(html):
#每次要加起始符和結束符
pattern=re.compile('<dd>.*?board-index.*?">(\d+)</i>.*?data-src="(.*?)".*?</a>.*?name"><a.*?>(.*?)</a>'
+'.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>',re.S)
items=re.findall(pattern,html)
for item in items:
yield {
'index':item[0],
'image':item[1],
'title':item[2],
#切片,去掉'主演:'
'actor':item[3].strip()[3:],
'time':item[4].strip()[5:],
'score':item[5]+item[6]
}
#寫入文件
def write_to_file(content):
#加encoding顯示中文,寫入方式爲'a',否則只能寫一條
with open('result.txt','a',encoding='utf-8') as f:
f.write(json.dumps(content,ensure_ascii=False)+'\n')
f.close()
#根據圖片鏈接下載圖片
def download(image_url, pathname):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.108 Safari/537.36 2345Explorer/8.7.0.16013'
}
response = requests.get(image_url, headers=headers)
with open(pathname, 'ab') as f:
f.write(response.content) #二進制文件
f.close()
#定義main方法調用
#根據網頁結構加入參數offset,獲得下一頁內容
def main(offset):
#把offset作爲參數傳入url
url="http://maoyan.com/board/4?offset="+str(offset)
html=get_one_page(url)
count=0 #下載的圖片計數
for item in parse_one_page(html):
print(item)
#把爬取的內容寫入txt文件
write_to_file(item)
#下載圖片到文件夾
pathname = "D:\\spider\\image\\" + str(count + 1) + ".jpg"
download(item['image'], pathname)
count = count + 1
if __name__=='__main__':
#讀取10頁前100個電影,構造0-90的循環
for i in range(10):