在本人用scrapy框架抓取目標網站時由於抓取信息量過大,常常會碰到目標網站封閉本人主機IP,因此學習並找到通過代理IP解決IP限制問題,scrapy裏怎麼設置可參考:scrapy設置IP池,這裏只做代理IP獲取並驗證部分演示。
#!/usr/bin/env python # encoding: utf-8 """ @version: v1.0 @author: W_H_J @license: Apache Licence @contact: [email protected] @site: @software: PyCharm @file: Proxies.py @time: 2018/3/13 14:09 @describe: 抓取xici網站代理ip http://blog.csdn.net/u011781521/article/details/70194744?locationNum=4&fps=1 """ from bs4 import BeautifulSoup from multiprocessing import Process, Queue import random import requests class Proxies(object): """docstring for Proxies""" def __init__(self, page=3): self.proxies = [] self.verify_pro = [] self.page = page self.headers = { 'Accept': '*/*', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36', 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'zh-CN,zh;q=0.8' } self.get_proxies() self.get_proxies_nn() def get_proxies(self): page = random.randint(1,10) page_stop = page + self.page while page < page_stop: url = 'http://www.xicidaili.com/nt/%d' % page html = requests.get(url, headers=self.headers).content soup = BeautifulSoup(html, 'lxml') ip_list = soup.find(id='ip_list') for odd in ip_list.find_all(class_='odd'): protocol = odd.find_all('td')[5].get_text().lower()+'://' self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]])) page += 1 def get_proxies_nn(self): page = random.randint(1,10) page_stop = page + self.page while page < page_stop: url = 'http://www.xicidaili.com/nn/%d' % page html = requests.get(url, headers=self.headers).content soup = BeautifulSoup(html, 'lxml') ip_list = soup.find(id='ip_list') for odd in ip_list.find_all(class_='odd'): protocol = odd.find_all('td')[5].get_text().lower() + '://' self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]])) page += 1 def verify_proxies(self): # 沒驗證的代理 old_queue = Queue() # 驗證後的代理 new_queue = Queue() print ('verify proxy........') works = [] for _ in range(15): works.append(Process(target=self.verify_one_proxy, args=(old_queue,new_queue))) for work in works: work.start() for proxy in self.proxies: old_queue.put(proxy) for work in works: old_queue.put(0) for work in works: work.join() self.proxies = [] while 1: try: self.proxies.append(new_queue.get(timeout=1)) except: break print ('verify_proxies done!') def verify_one_proxy(self, old_queue, new_queue): while 1: proxy = old_queue.get() if proxy == 0:break protocol = 'https' if 'https' in proxy else 'http' proxies = {protocol: proxy} try: if requests.get('http://www.baidu.com', proxies=proxies, timeout=2).status_code == 200: print ('success %s' % proxy) new_queue.put(proxy) except: print ('fail %s' % proxy) if __name__ == '__main__': a = Proxies() a.verify_proxies() print (a.proxies) proxie = a.proxies with open('proxies.txt', 'a') as f: for proxy in proxie: f.write(proxy+'\n')
這裏獲取到的IP質量較低,爲了提高IP質量,在下面部分再次用於抓取代理IP驗證,這裏耗時比較多,但是經過再次篩選完的IP質量較高,可用於最終IP池設置。
#!/usr/bin/env python # encoding: utf-8 """ @version: v1.0 @author: W_H_J @license: Apache Licence @contact: [email protected] @site: @software: PyCharm @file: ceshiIp.py @time: 2018/3/13 14:17 @describe: 對抓取的代理ip再次驗證篩選,通過則保存,否則遺棄。 驗證 cmd命令行輸入Telnet 127.0.0.1 3306 """ import telnetlib f = open('proxies.txt') g = open('IP-OK.txt', 'w') for f in f: str_index_1 = f.find('//') str_index_2 = f.rfind(":") str_id = f[str_index_1+2:str_index_2] str_port = f[str_index_2+1:] try: telnetlib.Telnet(host=str(str_id), port=int(str_port), timeout=20) except: print('connect failed', str_id+":"+str_port) else: print('success', str_id+":"+str_port) g.write(str_id+":"+str_port) g.close()