百度貼吧爬蟲(案例練習:GET 請求)

#!/usr/bin/env python
# -*- coding: utf-8 -*-


import urllib.request
import urllib.parse
import ssl


get_url = 'http://tieba.baidu.com/f?kw=%s&ie=utf-8&pn=%d'

# 全局取消證書驗證
ssl._create_default_https_context = ssl._create_unverified_context

headers = {
	# GET /f?ie=utf-8&kw=%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD&fr=search HTTP/1.1
    'Host':	'tieba.baidu.com',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Referer': 'http://tieba.baidu.com/f?ie=utf-8&kw=%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD&fr=search',
    # 這個參數,一般不要加,不然獲取響應頁面時編碼會報錯!!!
    # 'Accept-Encoding':'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cookie': 'TIEBA_USERTYPE=16f2e80db0d2749d6d8940be; TIEBAUID=cb23caae14130a0d384a57f1; BAIDUID=5825D3624FFD2FF79AD102CCE35CF40D:FG=1; PSTM=1532620217; BIDUPSID=4CA0F78CD45B4F46C5E80CFE8C9EB708; bdshare_firstime=1534381675497; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=26524_1434_21122_26350; delPer=0; PSINO=1; Hm_lvt_98b9d8c2fd6608d564bf2ac2ae642948=1540133767,1540133797,1540202629,1540954230; wise_device=0; Hm_lpvt_98b9d8c2fd6608d564bf2ac2ae642948=1540966604',
    'Connection':'keep-alive'
}


def load_url_data(search_keywords, num):
    """
    作用:發送請求,獲得響應
    :param search_keywords: 要查詢的關鍵字,
    :param num: 要爬取的頁碼數,
    :return: response
    """

    for i in range(num):
        full_get_url = get_url % (search_keywords, i*50)
        headers_request = urllib.request.Request(url=full_get_url, headers=headers)
        response = urllib.request.urlopen(headers_request)

        # read()方法只能讀一次,第二次,沒有內容了
        # 方法一:
        # content = response.read().decode("utf-8") # 報錯!
        # UnicodeDecodeError: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte
        # 原因:在headers中添加了'Accept-Encoding':'gzip, deflate',把它去掉即可。
        # 方法二:
        # 如果'Accept-Encoding':'gzip, deflate',添加了,運行以下代碼可以成功,但是亂碼!!!
        # content = response.read().decode("utf-8", errors="replace")

        content = response.read().decode("utf-8")
        write_to_file(content, i)


def write_to_file(content, i):
    """
    作用:將響應的頁面保存在本地
    :param content: 服務器返回的響應頁面,
    :param i: 變量
    :return: html頁面
    """

    # open打開一個文件,指定改文件路徑和文件名,填寫模式,和該文件的編碼方式,如果mode='wb',就不需要指定encoding
    with open("./貼吧第%d頁.html" % (i+1), mode='wb') as fp:
        fp.write(content.encode('utf-8'))
        print("貼吧第%d頁保存成功" % (i+1))


if __name__ == '__main__':

    search_keywords = input("請輸入要查詢的關鍵字:")

    # 方法一:報錯
    # search_keywords = urllib.parse.urlencode(search_keywords).encode('utf-8')
    # TypeError: not a valid non-string sequence or mapping object
    # 原因:只有在解析多個詞的時候,用urllib.parse.urlencode().encode() | urlencode()用於post_url

    # 方法二:OK
    # 如果需要解析單個詞的時候使用urllib.parse.quote() | quote()|unquote() 用於get_url
    search_keywords = urllib.parse.quote(search_keywords)

    try:
        num = int(input("請輸入要爬取多少頁:"))
    except Exception as e:
        print("請輸入數字型數據:")
        num = int(input("請輸入要爬取多少頁:"))

    load_url_data(search_keywords, num)

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章