'''
爬取網址:學習猿地猿圈
爬取內容:文章標題,文章連接,作者,時間
工具:bs4,requests
結果:爬取到文件之中
'''
from bs4 import BeautifulSoup
import requests,json
#1.定義請求頭和url
url = 'https://www.lmonkey.com/t'
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36"
}
alldata = []
#2.請求獲取數據
res = requests.get(url = url,headers = headers)
if res.status_code == 200:
#3.解析數據
soup = BeautifulSoup(res.text,'lxml')
#獲取頁面中所有的文章
divclass = soup.find_all( 'div',class_="list-group-item list-group-item-action p-06")
for i in divclass:
my_title = i.find('div',class_="topic_title mb-0 lh-180")
if my_title:
my_title = my_title.text.split("\n")[0]
my_url = i.a["href"]
my_time = i.span['title']
my_author = i.strong.a.text
print(my_author)
print(my_url)
print(my_time)
print(my_title)
temp = {'title':my_title,
'passage_url':my_url,
'time':my_time,
'author':my_author
}
alldata.append(temp)
#4.寫入文件
with open('C:/Users/lsy/Desktop/lsy.json','w') as fp:
json.dump(alldata,fp)