前面一節講過獲取所有的城市的AQI,僅僅是將獲取的AQI輸入,並沒有保存,這節課重點講如何將獲取的AQI保存到本地的CSV文件。
"""
作者:lanxingbudui
日期:2020-03-04
功能:AQI計算
版本:8.0
"""
import requests
from bs4 import BeautifulSoup
import csv
def get_city_aqi(city_pinyin):
"""
獲取一個城市的所有AQI指標
"""
url = 'http://pm25.in/' + city_pinyin
r = requests.get(url, timeout=30) # 鏈接網頁請求
# print(r.status_code) # 打印返回結果,200成功 400失敗
soup = BeautifulSoup(r.text, 'lxml') # 創建BeautifulSoup對象
div_list = soup.find_all('div', {'class': 'span1'}) # 查找所有spen1屬性的節點
# 或者 = soup.find_all('div', class_ = 'spanl') # 不推薦這樣寫,因爲class是關鍵字,有時容易忘記下劃線。
city_aqi = []
for i in range(8):
div_content = div_list[i]
caption = div_content.find('div', {'class': 'caption'}).text.strip()
value = div_content.find('div', {'class': 'value'}).text.strip()
# .text是獲取屬性值,並且.strip()移除上步驟中 text 字符串頭尾的空格或換行符
city_aqi.append( value)
return city_aqi
def get_all_cities():
"""
獲取所有城市
"""
url = 'http://pm25.in/'
city_list = []
r = requests.get(url, timeout=30) # 鏈接網頁請求
soup = BeautifulSoup(r.text, 'lxml') # 創建BeautifulSoup對象
city_div = soup.find_all('div', {'class': 'bottom'})[1]
# <a href="/abazhou">阿壩州</a> 獲取類似這一段的內容,取所有的城市
city_link_list = city_div.find_all('a')
for city_link in city_link_list:
city_name = city_link.text # 獲取value值---阿壩州
city_pinyin = city_link['href'][1:] # 獲取hrdf的屬性值 --/abazhou 並消去/
city_list.append((city_name, city_pinyin))
return city_list
def main():
"""
主函數
"""
city_list = get_all_cities()
# for city in city_list:
# city_name = city[0]
# city_pinyin = city[1]
# city_aqi = get_city_aqi(city_pinyin)
# print(city_name, city_aqi)
header = ['City', 'AQI', 'PM2.5/1h', 'PM10/1h', 'CO/1h', 'NO2/1h', 'O3/1h', 'O3/8h', 'SO2/1h']
with open('china_city_aqi.csv', 'w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
for i, city in enumerate(city_list):
if (i + 1) % 10 == 0:
print('已處理{}條數據,共{}條數據'.format(i + 1, len(city_list)))
city_name = city[0]
city_pinyin = city[1]
city_aqi = get_city_aqi(city_pinyin)
row = [city_name] + city_aqi
writer.writerow(row)
if __name__ == '__main__':
main()