表情包爬蟲

一、同步爬蟲



import requests
from lxml import etree
from urllib import request
import os
import re
import time


def get_page_source(link):
    headers = {
        'Referer': 'http://www.doutula.com/photo/list/?page=23',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
        'Cookie': '__cfduid=d74bb1bdede33ae5fa88970198604232f1570874777; XSRF-TOKEN=eyJpdiI6IjE3ZUNSS1VJWXp2MzRENEhOdmlPSXc9PSIsInZhbHVlIjoiUFBVM25OSVBhZDRsSEhheGhGaVpLdFgyWU1TUmdoUGY2TFFxQ0ZkQUZvNjBONW94MmtmdDVHTEZ0TmMzWW5GNyIsIm1hYyI6IjY4NTQ5Yjk0MDVlOGViMWI1NTA4YWYyODI1N2NhNGJhMWFjMWQwMjI5NTEyMGQ2NTlmYWUzNGI4ZmVhMzkzNjQifQ%3D%3D; doutula_session=eyJpdiI6ImxmeFwvcDR1UVR0OTcrOVFPbnM4eCtnPT0iLCJ2YWx1ZSI6IjV4c3liSTF2VUtBellnbHJhNWxjWWk1QmZnRllRR0wwYnRvZjFzeTNjMFJkWEZlcWZiTlA4aEVXRUh6OWZKV3giLCJtYWMiOiJiZGU2ZTFkOTFhMTkyNjFkYmUwMTU1MGFiMWY0MDgxNWQ3MzQ4MDBmNmE4NjEyMzc1ODFjMDRjYmM2NGYxYjk0In0%3D; UM_distinctid=16dbf6ee8e4417-0d482538bfe3688-14377a40-144000-16dbf6ee8e652a; CNZZDATA1256911977=144637179-1570873422-%7C1570878822; Hm_lvt_24b7d5cc1b26f24f256b6869b069278e=1570881136; Hm_lpvt_24b7d5cc1b26f24f256b6869b069278e=1570881170'
    }
    resp = requests.get(link, headers=headers)
    html = etree.HTML(resp.text)
    imgs = html.xpath("//div[@class='page-content text-center']//a//img[@class!='gif']")
    for img in imgs:
        img_url = img.get("data-original")  # 圖片鏈接
        alt = img.get("alt")  # 圖片名字
        alt = re.sub(r'[*?。,?,\.,!!]', "", alt)  # 替換非法命名字符
        suffix = os.path.splitext(img_url)[-1]  # 獲取後綴
        # print(img_url, alt, suffix)
        filename = alt + suffix
        request.urlretrieve(img_url, 'images/' + filename)  # 保存圖片
        # print(etree.tostring(img))


def main():
    for i in range(1, 20):
        url = 'http://www.doutula.com/photo/list/?page={}'.format(i)
        get_page_source(url)


if __name__ == '__main__':
    start = time.time()
    main()
    end = time.time()
    print(end - start)

二、異步模式

多線程用法:https://www.cnblogs.com/Eva-J/articles/8306047.html


import re
import time
from lxml import etree
import requests
import os
from urllib import request
from queue import Queue
import threading

'''
Queue是線程安全的
'''


class Get_Link(threading.Thread):
    def __init__(self, page_queue, image_queue, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.image_queue = image_queue

    def run(self):
        while True:
            if self.page_queue.empty():  # 如果爲空,證明20頁已抓取完
                break
            url = self.page_queue.get()  # 獲取每頁的url
            self.get_link(url)  # 調用get_link,獲取每頁中所有圖片的鏈接,put隊列中

    def get_link(self, url):
        headers = {
            'Referer': 'http://www.doutula.com/photo/list/?page=23',
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
            'Cookie': '__cfduid=d74bb1bdede33ae5fa88970198604232f1570874777; XSRF-TOKEN=eyJpdiI6IjE3ZUNSS1VJWXp2MzRENEhOdmlPSXc9PSIsInZhbHVlIjoiUFBVM25OSVBhZDRsSEhheGhGaVpLdFgyWU1TUmdoUGY2TFFxQ0ZkQUZvNjBONW94MmtmdDVHTEZ0TmMzWW5GNyIsIm1hYyI6IjY4NTQ5Yjk0MDVlOGViMWI1NTA4YWYyODI1N2NhNGJhMWFjMWQwMjI5NTEyMGQ2NTlmYWUzNGI4ZmVhMzkzNjQifQ%3D%3D; doutula_session=eyJpdiI6ImxmeFwvcDR1UVR0OTcrOVFPbnM4eCtnPT0iLCJ2YWx1ZSI6IjV4c3liSTF2VUtBellnbHJhNWxjWWk1QmZnRllRR0wwYnRvZjFzeTNjMFJkWEZlcWZiTlA4aEVXRUh6OWZKV3giLCJtYWMiOiJiZGU2ZTFkOTFhMTkyNjFkYmUwMTU1MGFiMWY0MDgxNWQ3MzQ4MDBmNmE4NjEyMzc1ODFjMDRjYmM2NGYxYjk0In0%3D; UM_distinctid=16dbf6ee8e4417-0d482538bfe3688-14377a40-144000-16dbf6ee8e652a; CNZZDATA1256911977=144637179-1570873422-%7C1570878822; Hm_lvt_24b7d5cc1b26f24f256b6869b069278e=1570881136; Hm_lpvt_24b7d5cc1b26f24f256b6869b069278e=1570881170'
        }
        resp = requests.get(url, headers=headers)

        html = etree.HTML(resp.text)  # 使用XPath解析
        imgs = html.xpath("//div[@class='page-content text-center']//a//img[@class!='gif']")
        for img in imgs:
            img_url = img.get("data-original")  # 圖片鏈接
            alt = img.get("alt")  # 圖片名字
            alt = re.sub(r'[*?。,?,\.,!!\/]', "", alt)  # 替換非法命名字符
            suffix = os.path.splitext(img_url)[-1]  # 獲取後綴
            # print(img_url, alt, suffix)
            filename = alt + suffix  # 圖片名
            self.image_queue.put((img_url, filename))  # 以元組形式放入隊列


class Download_Image(threading.Thread):
    def __init__(self, page_queue, image_queue, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.image_queue = image_queue

    def run(self):
        # 執行圖片下載
        start = time.time()
        while True:
            if self.image_queue.empty():
                if self.page_queue.empty():
                    end = time.time()
                    print(end - start)  # 運行時間
                    return
            img_url, filename = self.image_queue.get()  # 獲取隊列中的圖片鏈接和名字
            request.urlretrieve(img_url, 'images2/' + filename)  # 下載
            print('over')


def main():
    page_queue = Queue(100)  # 頁面隊列
    image_queue = Queue(500)  # 圖片隊列
    for i in range(21, 40):
        url = 'http://www.doutula.com/photo/list/?page={}'.format(i)  # 下載21~41頁的圖片
        page_queue.put(url)
    for x in range(5):  # 開啓5個線程
        t = Get_Link(page_queue, image_queue)  # 線程安全隊列
        t.start()
    for x in range(5):  # 5個下載線程
        t = Download_Image(page_queue, image_queue)
        t.start()


if __name__ == '__main__':
    main()

同步運行時間約120s,異步開啓5個線程,運行時間約44s,異步大幅節約了時間。

運行截圖:

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章