爬蟲練習(四)--ip地址問題

前言:

大多數網站會根據訪問的請求頭,分析是否爲人爲請求

原因:

Python的請求頭


Host: 127.0.0.1:5000
User-Agent: python-requests/2.21.0
Accept-Encoding: gzip, deflate
Accept: */*
Connection: keep-alive

反爬蟲


@app.route('/getInfo')
def hello_world():
    if(str(request.headers.get('User-Agent')).startswith('python')):
        return "小子,使用爬蟲是吧?滾你的"
    else:
        return "這裏假裝有很多數據"


if __name__ == "__main__":
    app.run(debug=True)

Notes:

在網上有很多免費的代理  隨便搜就有一堆了 但是不穩定

可以搞一個IP代理池

IP代理池

自建代理池:

爬取西刺代理上面免費提供的代理ip:

教程連接:
https://www.jianshu.com/p/2daa34a435df

from bs4 import BeautifulSoup
import requests
from urllib import request, error
import threading

inFile = open('proxy.txt')
verifiedtxt = open('verified.txt')
lock = threading.Lock()


def getProxy(url):
    # 打開我們創建的txt文件
    proxyFile = open('proxy.txt', 'a')
    # 設置UA標識
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
    }
    # page是我們需要獲取多少頁的ip,這裏我們獲取到第9頁
    for page in range(1, 10):
        # 通過觀察URL,我們發現原網址+頁碼就是我們需要的網址了,這裏的page需要轉換成str類型
        urls = url + str(page)
        # 通過requests來獲取網頁源碼
        rsp = requests.get(urls, headers=headers)
        html = rsp.text
        # 通過BeautifulSoup,來解析html頁面
        soup = BeautifulSoup(html)
        # 通過分析我們發現數據在 id爲ip_list的table標籤中的tr標籤中
        trs = soup.find('table', id='ip_list').find_all('tr')  # 這裏獲得的是一個list列表
        # 我們循環這個列表
        for item in trs[1:]:
            # 並至少出每個tr中的所有td標籤
            tds = item.find_all('td')
            # 我們會發現有些img標籤裏面是空的,所以這裏我們需要加一個判斷
            if tds[0].find('img') is None:
                nation = '未知'
                locate = '未知'
            else:
                nation = tds[0].find('img')['alt'].strip()
                locate = tds[3].text.strip()
            # 通過td列表裏面的數據,我們分別把它們提取出來
            ip = tds[1].text.strip()
            port = tds[2].text.strip()
            anony = tds[4].text.strip()
            protocol = tds[5].text.strip()
            speed = tds[6].find('div')['title'].strip()
            time = tds[8].text.strip()
            # 將獲取到的數據按照規定格式寫入txt文本中,這樣方便我們獲取
            proxyFile.write('%s|%s|%s|%s|%s|%s|%s|%s\n' % (nation, ip, port, locate, anony, protocol, speed, time))


def verifyProxyList():
    verifiedFile = open('verified.txt', 'a')

    while True:
        lock.acquire()
        ll = inFile.readline().strip()
        lock.release()
        if len(ll) == 0: break
        line = ll.strip().split('|')
        ip = line[1]
        port = line[2]
        realip = ip + ':' + port
        code = verifyProxy(realip)
        if code == 200:
            lock.acquire()
            print("---Success:" + ip + ":" + port)
            verifiedFile.write(ll + "\n")
            lock.release()
        else:
            print("---Failure:" + ip + ":" + port)


def verifyProxy(ip):
    '''
    驗證代理的有效性
    '''
    requestHeader = {
        'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36"
    }
    url = "http://www.baidu.com"
    # 填寫代理地址
    proxy = {'http': ip}
    # 創建proxyHandler
    proxy_handler = request.ProxyHandler(proxy)
    # 創建opener
    proxy_opener = request.build_opener(proxy_handler)
    # 安裝opener
    request.install_opener(proxy_opener)

    try:
        req = request.Request(url, headers=requestHeader)
        rsq = request.urlopen(req, timeout=5.0)
        code = rsq.getcode()
        return code
    except error.URLError as e:
        return e


if __name__ == '__main__':
    tmp = open('proxy.txt', 'w')
    tmp.write("")
    tmp.close()
    tmp1 = open('verified.txt', 'w')
    tmp1.write("")
    tmp1.close()
    getProxy("http://www.xicidaili.com/nn/")
    getProxy("http://www.xicidaili.com/nt/")
    getProxy("http://www.xicidaili.com/wn/")
    getProxy("http://www.xicidaili.com/wt/")

    all_thread = []
    for i in range(30):
        t = threading.Thread(target=verifyProxyList)
        all_thread.append(t)
        t.start()

    for t in all_thread:
        t.join()

    inFile.close()
    verifiedtxt.close()

 

後續完善:

不錯的開源 ip 代理池:

https://github.com/Python3WebSpider/ProxyPool.git

按照上面github的提示,你需要安裝好一個redis:
教程:
https://www.runoob.com/redis/redis-install.html

然後將代理池的文件目錄放在指定文件,然後cmd跳轉到改目錄:

然後啓動IP代理權:

from proxypool.scheduler import Scheduler
import argparse


parser = argparse.ArgumentParser(description='ProxyPool')
parser.add_argument('--processor', type=str, help='processor to run')
args = parser.parse_args()

if __name__ == '__main__':
    # if processor set, just run it
    if args.processor:
        getattr(Scheduler(), f'run_{args.processor}')()
    else:
        Scheduler().run()

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章