Python之scrapy使用技巧

scrapy文件結構

scrapy directory
使用方法都寫在程序裏面的註釋中,請盡情享用,如果您覺得不錯可以點個贊哦🙂

sample_spider代碼

"""Spider類基礎屬性和方法
屬性:                       含義:
name                       爬蟲名稱,它必須是唯一的,用來啓動爬蟲
allowed_domains            允許爬取的域名,是可選配置
start_urls                 起始URL列表,當沒有重寫start_requests()方法時,默認使用這個列表
custom_settings            它是一個字典,專屬與本Spider的配置,此設置會覆蓋項目全局的設置,必須在初始化前被更新,必須定義成類變量
spider                    它是由from_crawler()方法設置的,代表本Spider類對應的Crawler對象,可以獲取項目的全局配置信息
settings                   它是一個Settings對象,我們可以直接獲取項目的全局設置變量

方法:                      含義:
start_requests()           生成初始請求,必須返回一個可迭代對象,默認使用start_urls裏的URL和GET請求,如需使用POST需要重寫此方法
parse()                    當Response沒有指定回調函數時,該方法會默認被調用,該函數必須要返回一個包含Request或Item的可迭代對象
closed()                   當Spider關閉時,該方法會被調用,可以在這裏定義釋放資源的一些操作或其他收尾操作

Request屬性:
meta                       可以利用Request請求傳入參數,在Response中可以取值,是一個字典類型
cookies                    可以傳入cookies信息,是一個字典類型
dont_filter                如果使用POST,需要多次提交表單,且URL一樣,那麼就必須設置爲True,防止被當成重複網頁過濾掉
"""
# -*- coding: utf-8 -*-
import scrapy

from ..items import ExampleItem
from scrapy.http import Request, FormRequest
from scrapy import Selector

__author__ = 'Evan'


class SampleSpider(scrapy.Spider):
    name = 'sample_spider'  # 項目名稱,具有唯一性不能同名
    allowed_domains = ['quotes.toscrape.com']  # 允許的domain range
    start_urls = ['http://quotes.toscrape.com/']  # 起始URL

    """更改初始請求,必須返回一個可迭代對象
    def start_requests(self):
        return [Request(url=self.start_urls[0], callback=self.parse)]
        or
        yield Request(url=self.start_urls[0], callback=self.parse)
    """

    def parse(self, response):
        """
        當Response沒有指定回調函數時,該方法會默認被調用
        :param response: From the start_requests() function
        :return: 該函數必須要返回一個包含 Request 或 Item 的可迭代對象
        """
        # TODO Request attribute
        # print(response.request.url)  # 返回Request的URL
        # print(response.request.headers)  # 返回Request的headers
        # print(response.request.headers.getlist('Cookie'))  # 返回Request的cookies

        # TODO Response attribute
        # print(response.text)  # 返回Response的HTML
        # print(response.body)  # 返回Response的二進制格式HTML
        # print(response.url)  # 返回Response的URL
        # print(response.headers)  # 返回Response的headers
        # print(response.headers.getlist('Set-Cookie'))  # 返回Response的cookies
        # json.loads(response.text)  # 獲取AJAX數據,返回一個字典

        # TODO 使用Selector選擇器
        # selector = Selector(response=response)  # 選擇Response初始化
        # selector = Selector(text=div)  # 選擇HTML文本初始化
        # selector.xpath('//a/text()').extract()  # 使用xpath選擇器解析,返回一個列表
        # selector.xpath('//a/text()').re('Name:\s(.*)')  # 使用xpath選擇器 + 正則表達式解析,返回正則匹配的分組列表
        # selector.xpath('//a/text()').re_first('Name:\s(.*)')  # 使用xpath選擇器 + 正則表達式解析,返回正則匹配的第一個結果

        # TODO 從settings.py中獲取全局配置信息
        # print(self.settings.get('USER_AGENT'))

        quotes = response.css('.quote')  # 使用css選擇器,返回一個SelectorList類型的列表
        item = ExampleItem()
        for quote in quotes:
            # ::text  獲取文本
            # ::attr(src)  獲取src屬性的值
            item['text'] = quote.css('.text::text').extract_first()  # 返回匹配到的第一個結果
            item['author'] = quote.css('.author::text').extract_first()
            item['tags'] = quote.css('.tags .tag::text').extract()  # 返回一個包含所有結果的列表
            yield item

        next_url = response.css('.pager .next a::attr("href")').extract_first()  # 返回下一頁的URL
        url = response.urljoin(next_url)  # 拼接成一個絕對的URL
        yield Request(url=url, callback=self.parse)  # 設置回調函數,循環檢索每一頁

itmes代碼

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class ExampleItem(scrapy.Item):
    """
    定義數據結構
    """
    text = scrapy.Field()
    author = scrapy.Field()
    tags = scrapy.Field()

middlewares代碼

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import random
from scrapy import signals


class RandomUserAgentMiddleware(object):
    """
    自定義類
    """
    def __init__(self):
        self.user_agents = [
            # Chrome UA
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
            ' Chrome/73.0.3683.75 Safari/537.36',
            # IE UA
            'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
            # Microsoft Edge UA
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
            ' Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'
        ]

    def process_request(self, request, spider):
        """
        生成一個隨機請求頭
        :param request:
        :param spider:
        :return:
        """
        request.headers['User-Agent'] = random.choice(self.user_agents)


class ExampleSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spider.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        """
        當 Response 被 Spider MiddleWare 處理時,會調用此方法
        :param response:
        :param spider:
        :return:
        """
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        """
        當 Spider 處理 Response 返回結果時,會調用此方法
        :param response:
        :param result:
        :param spider:
        :return:
        """
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        """
        以 Spider 啓動的 Request 爲參數被調用,執行的過程類似 process_spider_output(),必須返回 Request
        :param start_requests:
        :param spider:
        :return:
        """
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class ExampleDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spider.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        """
        在發送請求到 Download 之前調用此方法,可以修改User-Agent、處理重定向、設置代理、失敗重試、設置Cookies等功能
        :param request:
        :param spider:
        :return: 如果返回的是一個 Request,會把它放到調度隊列,等待被調度
        """
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        """
        在發送 Response 響應結果到 Spider 解析之前調用此方法,可以修改響應結果
        :param request:
        :param response:
        :param spider:
        :return: 如果返回的是一個 Request,會把它放到調度隊列,等待被調度
        """
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        """
        當 Downloader 或 process_request() 方法拋出異常時,會調用此方法
        :param request:
        :param exception:
        :param spider:
        :return: 如果返回的是一個 Request,會把它放到調度隊列,等待被調度
        """
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

pipelines代碼

# -*- coding: utf-8 -*-
import pymongo
from scrapy.exceptions import DropItem


class TextPipeline(object):
    """
    自定義類
    """
    def __init__(self):
        self.limit = 50

    def process_item(self, item, spider):
        """
        必須要實現的方法,Pipeline會默認調用此方法
        :param item:
        :param spider:
        :return: 必須返回 Item 類型的值或者拋出一個 DropItem 異常
        """
        if item['text']:
            if len(item['text']) > self.limit:  # 對超過50個字節長度的字符串進行切割
                item['text'] = item['text'][:self.limit].rstrip() + '...'
            return item
        else:
            raise DropItem('Missing Text')  # 如果拋出此異常,會丟棄此Item,不再進行處理


class MongoPipeline(object):
    """
    自定義類
    """
    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db
        self.client = None
        self.db = None

    @classmethod
    def from_crawler(cls, crawler):
        """
        Pipelines的準備工作,通過crawler可以拿到全局配置的每個配置信息
        :param crawler:
        :return: 類實例
        """
        # 使用類方法,返回帶有MONGO_URI和MONGO_DB值的類實例
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),  # MONGO_URI的值從settings.py獲取
            mongo_db=crawler.settings.get('MONGO_DB')  # MONGO_DB的值從settings.py獲取
        )

    def open_spider(self, spider):
        """
        當 Spider 開啓時,這個方法會被調用
        :param spider:
        :return:
        """
        self.client = pymongo.MongoClient(self.mongo_uri)  # 打開Mongodb連接
        self.db = self.client[self.mongo_db]

    def process_item(self, item, spider):
        """
        必須要實現的方法,Pipeline會默認調用此方法
        :param item:
        :param spider:
        :return: 必須返回 Item 類型的值或者拋出一個 DropItem 異常
        """
        name = item.__class__.__name__  # 創建一個集合,name='ExampleItem'
        self.db[name].update_one(item, {"$set": item}, upsert=True)  # 數據去重
        return item

    def close_spider(self, spider):
        """
        當 Spider 關閉時,這個方法會被調用
        :param spider:
        :return:
        """
        self.client.close()  # 關閉Mongodb連接

settings代碼

# -*- coding: utf-8 -*-

# Scrapy settings for example project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'example'

SPIDER_MODULES = ['example.spiders']
NEWSPIDER_MODULE = 'example.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# TODO 設置默認的用戶代理請求頭
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
             'Chrome/78.0.3904.108 Safari/537.36'

# Obey robots.txt rules
# TODO 不請求Robots協議
ROBOTSTXT_OBEY = False

# TODO 設置編碼格式
FEED_EXPORT_ENCODING = 'utf-8'  # 在json格式下轉換中文編碼
# FEED_EXPORT_ENCODING = 'gb18030'  # 在csv格式下轉換中文編碼

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# TODO 如果設置爲True則可以手動添加Cookies參數到Request請求中
COOKIES_ENABLED = True

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'example.middlewares.ExampleSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# TODO 使用下載中間件,設置隨機請求頭
DOWNLOADER_MIDDLEWARES = {
   'example.middlewares.RandomUserAgentMiddleware': 543,
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# TODO 使用項目管道,過濾和保存數據(字典的value越小,優先級越高,如下所示:300優先級 > 400優先級)
ITEM_PIPELINES = {
   'example.pipelines.TextPipeline': 300,
   'example.pipelines.MongoPipeline': 400,
}
# TODO Mongodb配置
MONGO_URI = 'localhost'
MONGO_DB = 'example'

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

main文件代碼

在配置完所有scrapy代碼後,創建一個main.py來啓動scrapy

# -*- coding=utf8 -*-
from scrapy import cmdline

# TODO 執行爬蟲指令
cmdline.execute("scrapy crawl sample_spider -o sample.json".split())  # 執行爬蟲並在當前目錄下生成一個sample.json文件
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章