爬蟲學習筆記(九)數據解析——bs4 2020.5.8

前言

本節學習數據解析的beautiful soup
在這裏插入圖片描述

1、基本使用

安裝

pip install beautifulsoup4

簡單調用解析庫

from bs4 import BeautifulSoup
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
# 1.轉類型
# 默認bs4會 調用你係統中lxml的解析庫 警告提示
# 主動設置 bs4的解析庫
soup = BeautifulSoup(html_doc, 'lxml')
# 2.格式化輸出 補全
result = soup.prettify()
print(result)

2、四大對象

from bs4 import BeautifulSoup
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="story"><!--...--></p> #註釋
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
"""
# 1.轉類型 bs4.BeautifulSoup'
soup = BeautifulSoup(html_doc, 'lxml')
print(type(soup))
# 2. 解析數據
# Tag 標籤對象 bs4.element.Tag'
result = soup.head
# 註釋的內容  類型 'bs4.element.Comment'
result = soup.p.string
print(type(result))
print(result)
result = soup.a
print(result)
# 內容 Navigablestring  'bs4.element.NavigableString
result = soup.a.string
print(result)
# 屬性
result = soup.a['href']
print(result)

這些都只能取第一個

3、多點解析

from bs4 import BeautifulSoup
html_doc = """
<html><head>
<title id="one">The Dormouse's story</title>
</head>
<body>
<p class="story"><!--...--></p>
<p class="title">
    p標籤的內容
    <b>The Dormouse's story</b>
</p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
"""
# 1.轉類型 bs4.BeautifulSoup'
soup = BeautifulSoup(html_doc, 'lxml')
# 2.通用解析方法
#  find--返回符合查詢條件的 第一個標籤對象
result = soup.find(name="p")
print(result) #打印了第一個p
result = soup.find(attrs={"class": "sisiter"})
print(result) #打印第一個class="sister"
result = soup.find(text="Tillie")
print(result) #這個沒意思,寫什麼傳什麼
result = soup.find(
    name='p',
    attrs={"class": "story"},
)
print(result) 
# find_all--list(標籤對象)
result = soup.find_all('a')
print(result) #返回列表
result = soup.find_all("a", limit=1)[0]
print(result) #就是find源碼
result = soup.find_all(attrs={"class": "sister"})
print(result) #所有class="sister"
# select_one---css選擇器
result = soup.select_one('.sister')
print(result) 
# select----css選擇器---list
result = soup.select('.sister')
print(result) #類選擇器
result = soup.select('#one')
print(result) #ID選擇器
result = soup.select('head title')
print(result) #後帶選擇器
result = soup.select('title,.title')
print(result) #主選擇器
result = soup.select('a[id="link3"]')
print(result) #屬性選擇器
# 標籤包裹的內容---list
result = soup.select('.title')[0].get_text()
print(result)
# 標籤的屬性
result = soup.select('#link1')[0].get('href')
print(result)

4、例子

import requests
from bs4 import BeautifulSoup
from lxml import etree
import json
class BtcSpider(object):
    def __init__(self):
        self.url = 'http://8btc.com/forum-61-{}.html' #要翻頁,用個大括號
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
            }
        # 保存列表頁的數據
        self.data_list = []
        self.data_detail = []
    # 1.發請求
    def get_response(self, url):
        response = requests.get(url, headers=self.headers)
        data = response.content #這裏轉字符串的話會出問題
        return data
    # 2.解析數據list
    def parse_list_data(self, data):
        # 1.轉類型
        soup = BeautifulSoup(data, 'lxml')
        # 2.解析內容 取出 所有的類選擇器的 A
        """
        html_data = etree.HTML(data)
        result_list = html_data.xpath('//div[contains(@id,"stickthread")]') #模糊查詢
        result_list = html_data.xpath('//head/following-sibling::*[1]') #取下一個標籤(平級)
        print(len(result_list))
        print(result_list)
        """
        title_list = soup.select('.xst')
        for title in title_list:
            list_dict_data = {}
            list_dict_data['title'] = title.get_text()
            list_dict_data['detail_url'] = title.get('href')
            self.data_list.append(list_dict_data)
    # 3.解析數據詳情頁
    def parse_detail_data(self, data):
        html_data = BeautifulSoup(data, 'lxml')
        # 取出問題--list[1][0]
        question = html_data.select('#thread_subject')[0].get_text()
        print(question)
        answer_list = html_data.select('.t_f')
        for answer in answer_list:
            answer_list = []
            answer_list.append(answer.get_text())
        detail_data = {
            "question": question,
            "answer": answer_list
        }
        self.data_detail.append(detail_data)
    # 4.保存數據
    def save_data(self, data, file_path):
        data_str = json.dumps(data)
        with open(file_path, 'w') as f:
            f.write(data_str)
    # 5.啓動
    def start(self):
        # 列表頁的請求
        for i in range(1, 2):
            url = self.url.format(1)
            data = self.get_response(url)
            self.parse_list_data(data)
        self.save_data(self.data_list, "04list.json")
        # 發送詳情頁的請求
        for data in self.data_list:
            detail_url = data['detail_url']
            detail_data = self.get_response(detail_url)
            # 解析詳情頁的數據
            self.parse_detail_data(detail_data)
        self.save_data(self.data_detail, 'detail.json')
BtcSpider().start()

結語

bs4就是找選擇器,比前兩個要簡單點
但效率還是正則快

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章