百度贴吧爬虫(案例练习:GET 请求)

#!/usr/bin/env python
# -*- coding: utf-8 -*-


import urllib.request
import urllib.parse
import ssl


get_url = 'http://tieba.baidu.com/f?kw=%s&ie=utf-8&pn=%d'

# 全局取消证书验证
ssl._create_default_https_context = ssl._create_unverified_context

headers = {
	# GET /f?ie=utf-8&kw=%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD&fr=search HTTP/1.1
    'Host':	'tieba.baidu.com',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Referer': 'http://tieba.baidu.com/f?ie=utf-8&kw=%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD&fr=search',
    # 这个参数,一般不要加,不然获取响应页面时编码会报错!!!
    # 'Accept-Encoding':'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cookie': 'TIEBA_USERTYPE=16f2e80db0d2749d6d8940be; TIEBAUID=cb23caae14130a0d384a57f1; BAIDUID=5825D3624FFD2FF79AD102CCE35CF40D:FG=1; PSTM=1532620217; BIDUPSID=4CA0F78CD45B4F46C5E80CFE8C9EB708; bdshare_firstime=1534381675497; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=26524_1434_21122_26350; delPer=0; PSINO=1; Hm_lvt_98b9d8c2fd6608d564bf2ac2ae642948=1540133767,1540133797,1540202629,1540954230; wise_device=0; Hm_lpvt_98b9d8c2fd6608d564bf2ac2ae642948=1540966604',
    'Connection':'keep-alive'
}


def load_url_data(search_keywords, num):
    """
    作用:发送请求,获得响应
    :param search_keywords: 要查询的关键字,
    :param num: 要爬取的页码数,
    :return: response
    """

    for i in range(num):
        full_get_url = get_url % (search_keywords, i*50)
        headers_request = urllib.request.Request(url=full_get_url, headers=headers)
        response = urllib.request.urlopen(headers_request)

        # read()方法只能读一次,第二次,没有内容了
        # 方法一:
        # content = response.read().decode("utf-8") # 报错!
        # UnicodeDecodeError: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start byte
        # 原因:在headers中添加了'Accept-Encoding':'gzip, deflate',把它去掉即可。
        # 方法二:
        # 如果'Accept-Encoding':'gzip, deflate',添加了,运行以下代码可以成功,但是乱码!!!
        # content = response.read().decode("utf-8", errors="replace")

        content = response.read().decode("utf-8")
        write_to_file(content, i)


def write_to_file(content, i):
    """
    作用:将响应的页面保存在本地
    :param content: 服务器返回的响应页面,
    :param i: 变量
    :return: html页面
    """

    # open打开一个文件,指定改文件路径和文件名,填写模式,和该文件的编码方式,如果mode='wb',就不需要指定encoding
    with open("./贴吧第%d页.html" % (i+1), mode='wb') as fp:
        fp.write(content.encode('utf-8'))
        print("贴吧第%d页保存成功" % (i+1))


if __name__ == '__main__':

    search_keywords = input("请输入要查询的关键字:")

    # 方法一:报错
    # search_keywords = urllib.parse.urlencode(search_keywords).encode('utf-8')
    # TypeError: not a valid non-string sequence or mapping object
    # 原因:只有在解析多个词的时候,用urllib.parse.urlencode().encode() | urlencode()用于post_url

    # 方法二:OK
    # 如果需要解析单个词的时候使用urllib.parse.quote() | quote()|unquote() 用于get_url
    search_keywords = urllib.parse.quote(search_keywords)

    try:
        num = int(input("请输入要爬取多少页:"))
    except Exception as e:
        print("请输入数字型数据:")
        num = int(input("请输入要爬取多少页:"))

    load_url_data(search_keywords, num)

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章