Python 爬蟲實現增加播客訪問量

一、序言:

  世界 1024 程序猿節日不加班,閒着沒事兒。。。隨手寫了個播客訪問量爬蟲玩玩,訪問量過萬不是事兒!!!每個步驟註釋都很清晰,代碼僅供學習參考!

---- Nick.Peng


二、所需環境:

  • Python3.x
  • 相關模塊: requests、json、lxml、urllib、bs4、fake_useragent

三、增加Blog訪問量代碼如下:

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Nick
# @Date:   2019-10-24 15:40:58
# @Last Modified by:   Nick
# @Last Modified time: 2019-10-24 16:54:31
import random
import re
import time
import urllib
import requests

from bs4 import BeautifulSoup
from fake_useragent import UserAgent

try:
    from lxml import etree
except Exception as e:
    import lxml.html
    # 實例化一個etree對象(解決通過from lxml import etree導包失敗)
    etree = lxml.html.etree

# 實例化UserAgent對象,用於產生隨機UserAgent
ua = UserAgent()


class BlogSpider(object):
    """
    Increase the number of CSDN blog visits.
    """

    def __init__(self):
        self.url = "https://blog.csdn.net/PY0312/article/list/{}"
        self.headers = {
            "Referer": "https://blog.csdn.net/PY0312/",
            "User-Agent": ua.random
        }
        self.firefoxHead = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
        self.IPRegular = r"(([1-9]?\d|1\d{2}|2[0-4]\d|25[0-5]).){3}([1-9]?\d|1\d{2}|2[0-4]\d|25[0-5])"

    def send_request(self, num):
        """
        模擬瀏覽器發起請求
        :param num: num
        :return: html_str
        """
        html_str = requests.get(self.url.format(
            num), headers=self.headers).content.decode()
        # print(html_str)

        return html_str

    def parse_data(self, html_str):
        """
        用於解析發起請求返回的數據
        :param html_str:
        :return: each_page_urls
        """
        # 將返回的 html字符串 轉換爲 element對象,用於xpath操作
        element_obj = etree.HTML(html_str)
        # print(element_obj)

        # 獲取每一頁所有blog的url
        each_page_urls = element_obj.xpath(
            '//*[@id="mainBox"]/main/div[2]/div/h4/a/@href')
        # print(each_page_urls)

        return each_page_urls

    def parseIPList(self, url="http://www.xicidaili.com/"):
        """
        爬取最新代理ip,來源:西刺代理
        注意:西刺代理容易被封,如遇到IP被封情況,採用以下兩種方法即可解決:
        方法一:請參考我上一篇博客《Python 實現快代理IP爬蟲》 ===> 喜歡研究的同學,可參考對接此接口
        方法二:直接屏蔽掉此接口,不使用代理也能正常使用
        :param url: "http://www.xicidaili.com/"
        :return: 代理IP列表ips
        """
        ips = []
        request = urllib.request.Request(url, headers=self.firefoxHead)
        response = urllib.request.urlopen(request)
        soup = BeautifulSoup(response, "lxml")
        tds = soup.find_all("td")
        for td in tds:
            string = str(td.string)
            if re.search(self.IPRegular, string):
                ips.append(string)
        # print(ips)
        return ips

    def main(self, total_page, loop_times, each_num):
        """
        調度方法
        :param total_page: 設置博客總頁數
        :param loop_times: 設置循環次數
        :param each_num: 設置每一頁要隨機挑選文章數
        :return:
        """
        i = 0
        # 根據設置次數,打開循環
        while i < loop_times:
            # 遍歷,得到每一頁的頁碼
            for j in range(total_page):
                # 拼接每一頁的url,並模擬發送請求, 返回響應數據
                html_str = self.send_request(j + 1)

                # 解析響應數據,得到每一頁所有博文的url
                each_page_urls = self.parse_data(html_str)

                # 調用parseIPList隨機產生代理IP,防反爬
                # ips = self.parseIPList()
                # proxies = {"http": "{}:8080".format(
                #     ips[random.randint(0, 40)])}

                # 遍歷,每一頁隨機挑選each_num篇文章
                for x in range(each_num):
                    # 隨機抽取每一頁的一篇博文進行訪問,防反爬
                    current_url = random.choice(each_page_urls)
                    status = True if requests.get(
                        current_url, headers=self.headers).content.decode() else False
                    print("當前正在訪問的文章是:{},訪問狀態:{}".format(current_url, status))
                    time.sleep(1)   # 延時1秒,防反爬
                time.sleep(1)   # 延時1秒,防反爬
            i += 1


if __name__ == '__main__':
    bs = BlogSpider()
    bs.main(7, 200, 3)  # 參數參照main方法說明,酌情設置

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章