爬蟲實戰——古詩文網古詩爬取

import re
import requests

# url = 'https://www.gushiwen.org/default_1.aspx'


def parse_page(url):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36'
    }
    response = requests.get(url, headers)
    text = response.text
    # print(text)
    titles = re.findall(r'<div\sclass="cont">.*?<b>(.*?)</b>', text, re.DOTALL)
    # print(titles)
    dynasties = re.findall(r'<p\sclass="source".*?<a.*?>(.*?)</a>', text, re.DOTALL)
    # print(dynasties)
    authors = re.findall(r'<p\sclass="source".*?</a>.*?<a.*?>(.*?)</a>', text, re.DOTALL)
    # print(authors)
    contents = re.findall(r'<div\sclass="contson".*?>(.*?)</div>', text, re.DOTALL)
    # print(contents)
    poems = []
    for content in contents:
        # print(content)
        x = re.sub(r'<.*?>', "", content)
        # print(x)
        poems.append(x.strip())
        # print(poems)
        shis = []
    for value in zip(titles, dynasties, authors, poems):
        title, dynasty, author, poem = value
        shi = {
            'title': title,
            'dyna': dynasty,
            'author': author,
            'poem': poem
        }
        shis.append(shi)
        for shi in shis:
            print(shi)
            print("="*30)


def main():
    base_url = 'https://www.gushiwen.org/default_{}.aspx'

    for x in range(1, 11):	# 利用正則表達式爬取1-10頁的內容
        url = base_url.format(x)
        parse_page(url)


if __name__ == '__main__':
    main()

正則表達式爬取古詩文網的詩詞

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章