Python—scrapy redis超全源码解析!

Scrapy-redis的源码解析

Scrapy-redis的官方文档写的比较简洁,没有提及其运行原理,所以如果想全面的理解分布式爬虫的运行原理,还是得看scrapy-redis的源代码才行。

connection.py

负责根据setting中配置实例化redis连接。被dupefilter和scheduler调用,总之涉及到redis存取的都要使用到这个模块。Connection提供了一个很重要的函数。
import six
from scrapy.utils.misc import load_object

from . import defaults

# Shortcut maps 'setting name' -> 'parmater name'.
# redis数据库的关系映射
SETTINGS_PARAMS_MAP = {
    'REDIS_URL': 'url',
    'REDIS_HOST': 'host',
    'REDIS_PORT': 'port',
    'REDIS_ENCODING': 'encoding',
}


def get_redis_from_settings(settings):
    # 获取一个redis连接实例
    # 生成连接redis参数
    """Returns a redis client instance from given Scrapy settings object.

    This function uses ``get_client`` to instantiate the client and uses
    ``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You
    can override them using the ``REDIS_PARAMS`` setting.

    Parameters
    ----------
    settings : Settings
        A scrapy settings object. See the supported settings below.

    Returns
    -------
    server
        Redis client instance.

    Other Parameters
    ----------------
    REDIS_URL : str, optional
        Server connection URL.
    REDIS_HOST : str, optional
        Server host.
    REDIS_PORT : str, optional
        Server port.
    REDIS_ENCODING : str, optional
        Data encoding.
    REDIS_PARAMS : dict, optional
        Additional client parameters.

    """
    # 浅拷贝,是为了防止params改变,会导致默认的REDIS_PARAMS被改变
    params = defaults.REDIS_PARAMS.copy()
    # 将settings中的参数更新到params
    params.update(settings.getdict('REDIS_PARAMS'))
    # XXX: Deprecate REDIS_* settings.
    # 遍历映射表,获取指定的参数
    for source, dest in SETTINGS_PARAMS_MAP.items():
        # 优先使用settings中的参数
        val = settings.get(source)
        # 如果settings中没有进行设置,则params不更新
        if val:
            params[dest] = val

    # Allow ``redis_cls`` to be a path to a class.
    if isinstance(params.get('redis_cls'), six.string_types):
        params['redis_cls'] = load_object(params['redis_cls'])

    return get_redis(**params)


# Backwards compatible alias.
from_settings = get_redis_from_settings


def get_redis(**kwargs):
    """Returns a redis client instance.

    Parameters
    ----------
    redis_cls : class, optional
        Defaults to ``redis.StrictRedis``.
    url : str, optional
        If given, ``redis_cls.from_url`` is used to instantiate the class.
    **kwargs
        Extra parameters to be passed to the ``redis_cls`` class.

    Returns
    -------
    server
        Redis client instance.

    """
    # 没有redis_cli,则默认redis连接
    redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
    url = kwargs.pop('url', None) # 判断kwargs有没有url
    if url:
        #用url链接redis,优先使用url连接redis
        return redis_cls.from_url(url, **kwargs)
    else:
        #用字典的方式连接redis
        return redis_cls(**kwargs)

defaults.py

import redis


# For standalone use.
# 去重的键名
DUPEFILTER_KEY = 'dupefilter:%(timestamp)s'
# 定义的存储items的键名(key),spider是爬虫的名称
PIPELINE_KEY = '%(spider)s:items'
# Redis的连接对象,用于连接redis
REDIS_CLS = redis.StrictRedis
# 字符集编码
REDIS_ENCODING = 'utf-8'
# Sane connection defaults.
# redis数据库的连接参数
REDIS_PARAMS = {
    'socket_timeout': 30,
    'socket_connect_timeout': 30,
    'retry_on_timeout': True,
    'encoding': REDIS_ENCODING,
}
# 队列的变量名,用于存储爬取的url队列
SCHEDULER_QUEUE_KEY = '%(spider)s:requests'
# 优先级队列,用于规定队列的进出方式
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'
# 用于去重的key值,给request加指纹存储的地方
SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter'
# 用于生成指纹的类
SCHEDULER_DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
#起始url对应的类(key)
START_URLS_KEY = '%(name)s:start_urls'
#起始url的类型
START_URLS_AS_SET = False

dupefilter.py

分布式爬虫url去重原理:   通过分析可以知道self.server为redis实例,使用一个key来向redis的一个set中插入fingerprint(这个key对于同一个spider是相同的,redis是一个key-value的数据库,如果key是相同的,访问到的值就是相同的,默认使用spider名字 + fingerpoint的key就是为了区分在不同主机上的不同spider实例,只要数据是同一个spider,就会访问到redis中的同一个spider-set而这个set就是url的判重池)。
import logging
import time

from scrapy.dupefilters import BaseDupeFilter
from scrapy.utils.request import request_fingerprint

from . import defaults
from .connection import get_redis_from_settings

logger = logging.getLogger(__name__)

# scrapy去重是利用集合实现的
# TODO: Rename class to RedisDupeFilter.
class RFPDupeFilter(BaseDupeFilter):
    """Redis-based request duplicates filter.

    This class can also be used with default Scrapy's scheduler.

    """

    logger = logger

    def __init__(self, server, key, debug=False):
        """Initialize the duplicates filter.

        Parameters
        ----------
        server : redis.StrictRedis
            The redis server instance.
            redis 连接实例

        key : str  存储requests指纹的地方
            Redis key Where to store fingerprints.
        debug : bool, optional
            Whether to log filtered requests.
            是否记录过滤的requests

        """
        #看server是如何生成的,因为我们通过server就可以获取redis中的队列或者set
        self.server = server
        self.key = key
        self.debug = debug
        self.logdupes = True
    # 类方法传递当前的方法
    @classmethod
    def from_settings(cls, settings):
        """Returns an instance from given settings.

        This uses by default the key ``dupefilter:<timestamp>``. When using the
        ``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
        it needs to pass the spider name in the key.

        Parameters
        ----------
        settings : scrapy.settings.Settings

        Returns
        -------
        RFPDupeFilter
            A RFPDupeFilter instance.


        """
        # 获取redis的连接实例
        server = get_redis_from_settings(settings)
        # XXX: This creates one-time key. needed to support to use this
        # class as standalone dupefilter with scrapy's default scheduler
        # if scrapy passes spider on open() method this wouldn't be needed
        # TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
        # 存取指纹的key
        key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
        debug = settings.getbool('DUPEFILTER_DEBUG') # 默认值是false
        # 传给当前类,并把参数传递给init函数
        return cls(server, key=key, debug=debug)

    @classmethod
    def from_crawler(cls, crawler):
        """Returns instance from crawler.

        Parameters
        ----------
        crawler : scrapy.crawler.Crawler

        Returns
        -------
        RFPDupeFilter
            Instance of RFPDupeFilter.

        """
        return cls.from_settings(crawler.settings)

    def request_seen(self, request):
        """Returns True if request was already seen.

        Parameters
        ----------
        request : scrapy.http.Request

        Returns
        -------
        bool

        """
        fp = self.request_fingerprint(request) # 生成一个指纹
        # This returns the number of values added, zero if already exists.
        # 将 指纹加入redis  是一个集合类型
        # self.server redis连接实例
        # self.key 存储指纹的key
        # fp  就是指纹
        added = self.server.sadd(self.key, fp)
        # 当added为0,说明指纹已经存在,返回True,否则返回False
        return added == 0

    def request_fingerprint(self, request):
        """Returns a fingerprint for a given request.

        Parameters
        ----------
        request : scrapy.http.Request

        Returns
        -------
        str

        """
        return request_fingerprint(request)

    @classmethod
    def from_spider(cls, spider):
        settings = spider.settings
        server = get_redis_from_settings(settings)
        dupefilter_key = settings.get("SCHEDULER_DUPEFILTER_KEY", defaults.SCHEDULER_DUPEFILTER_KEY)
        key = dupefilter_key % {'spider': spider.name}
        debug = settings.getbool('DUPEFILTER_DEBUG')
        return cls(server, key=key, debug=debug)

    def close(self, reason=''):
        # 当爬虫结束时,清空指纹的地方
        """Delete data on close. Called by Scrapy's scheduler.

        Parameters
        ----------
        reason : str, optional

        """
        self.clear()

    def clear(self):
        """Clears fingerprints data."""
        self.server.delete(self.key)

    # 生成日志的地方
    def log(self, request, spider):
        """Logs given request.

        Parameters
        ----------
        request : scrapy.http.Request
        spider : scrapy.spiders.Spider

        """
        if self.debug:
            msg = "Filtered duplicate request: %(request)s"
            self.logger.debug(msg, {'request': request}, extra={'spider': spider})
        elif self.logdupes:
            msg = ("Filtered duplicate request %(request)s"
                   " - no more duplicates will be shown"
                   " (see DUPEFILTER_DEBUG to show all duplicates)")
            self.logger.debug(msg, {'request': request}, extra={'spider': spider})
            self.logdupes = False

picklecompat.py

这里实现了loads和dumps两个函数,其实就是实现了一个serializer:   1、因为redis数据库不能存储复杂对象(value部分只能是字符串,字符串列表,字符串集合和hash,key部分只能是字符串),所以我们存啥都要先串行化成文本才行。这里使用的就是python的pickle模块,一个兼容py2和py3的串行化工具。
"""A pickle wrapper module with protocol=-1 by default."""

try:
    import cPickle as pickle  # PY2
except ImportError:
    import pickle #PY3用的包

#反序列化就是将字符串数据转化成json数据
def loads(s):
    return pickle.loads(s)

#序列化 就是将json数据转化成字符串
def dumps(obj):
    return pickle.dumps(obj, protocol=-1)

pipelines.py

pipelines.py中类的作用:   pipeline.py文件用来实现数据分布式处理。它通过从settings中拿到我们配置的REDIS_ITEMS_KEY作为key,把item串行化之后存入redis数据库对应的value中(这个value可以看出是个list,我们的每个item是这个list中的一个结点),这个pipeline把提取出的item存起来,主要是为了方便我们延后处理数据。
from scrapy.utils.misc import load_object
from scrapy.utils.serialize import ScrapyJSONEncoder
from twisted.internet.threads import deferToThread

from . import connection, defaults

# 序列化的字符串
default_serialize = ScrapyJSONEncoder().encode

# 用于处理爬虫爬取的数据 将数据序列化到redis中
class RedisPipeline(object):
    """Pushes serialized item into a redis list/queue

    Settings
    --------
    REDIS_ITEMS_KEY : str
        Redis key where to store items.
    REDIS_ITEMS_SERIALIZER : str
        Object path to serializer function.

    """

    def __init__(self, server,
                 key=defaults.PIPELINE_KEY,
                 serialize_func=default_serialize):
        """Initialize pipeline.

        Parameters
        ----------
        server : StrictRedis
            Redis client instance.
        key : str
            Redis key where to store items.
        serialize_func : callable
            Items serializer function.

        """
        self.server = server
        self.key = key
        self.serialize = serialize_func
    # 将类本身传入函数,用来生成参数和redis连接实例
    @classmethod
    def from_settings(cls, settings):
        # 生成redis连接实例
        params = {
            'server': connection.from_settings(settings),
        }
        # 如果设置中有REDIS_ITEMS_KEY,我们就用设置中
        if settings.get('REDIS_ITEMS_KEY'):
            params['key'] = settings['REDIS_ITEMS_KEY']
        # 如果设置中有序列化的函数,则优先使用设置中的
        if settings.get('REDIS_ITEMS_SERIALIZER'):
            params['serialize_func'] = load_object(
                settings['REDIS_ITEMS_SERIALIZER']
            )

        return cls(**params)

    @classmethod
    def from_crawler(cls, crawler):
        return cls.from_settings(crawler.settings)
    # 将item传递过来,自动触发这个函数
    def process_item(self, item, spider):
        # 创建一个线程,用于存储item,也就是说上一个iten还没有存储完,下一个item就可以同时存储
        return deferToThread(self._process_item, item, spider)
    # 实现存储的函数
    def _process_item(self, item, spider):
        key = self.item_key(item, spider) # 生成item_key
        data = self.serialize(item)  # 使用默认的序列化函数,将item序列化为字符串
        self.server.rpush(key, data) # 把数据放到redis里面----self.server是redis的连接实例
        return item
    # 用于存储item
    def item_key(self, item, spider):
        """Returns redis key based on given spider.

        Override this function to use a different key depending on the item
        and/or spider.

        """
        #根据spider的name生成一个redis_key
        return self.key % {'spider': spider.name}

queue.py

这是个队列类,它会作为scheduler调度request的容器来维护一个秩序:   1、 scheduler在每个主机上都会实例化一个,并且和spider一一对应,所以分布式运行时会有一个spider的多个实例和一个scheduler的多个实例存在于不同的主机上。   2、因为scheduler都是用相同的容器,而这些容器都连接同一个 redis服务器,又都使用spider名 + queue来作为key 读写数据,所以不同主机上的不同爬虫实例公用一个request调度池,实现了分布式爬虫之间的统一调度。
from scrapy.utils.reqser import request_to_dict, request_from_dict

from . import picklecompat


class Base(object):
    """Per-spider base queue class"""

    def __init__(self, server, spider, key, serializer=None):
        """Initialize per-spider redis queue.

        Parameters
        ----------
        server : StrictRedis
            Redis client instance.
        spider : Spider
            Scrapy spider instance.
        key: str
            Redis key where to put and get messages.
        serializer : object
            Serializer object with ``loads`` and ``dumps`` methods.

        """
        if serializer is None:
            # Backward compatibility.
            # TODO: deprecate pickle.
            serializer = picklecompat
        # 当序列化时没有laods函数时,就会抛出异常
        # 抛出异常的目的就是为了使传过来的序列化必须函数loads函数
        if not hasattr(serializer, 'loads'):
            raise TypeError("serializer does not implement 'loads' function: %r"
                            % serializer)
        # 当序列化时没有dumps函数时,就会抛出异常
        if not hasattr(serializer, 'dumps'):
            raise TypeError("serializer '%s' does not implement 'dumps' function: %r"
                            % serializer)
        # 下面的这些函数当类的所有函数,都可以使用
        self.server = server
        self.spider = spider
        self.key = key % {'spider': spider.name}
        self.serializer = serializer
    # 将requests进行编码成字符串
    def _encode_request(self, request):
        """Encode a request object"""
        # 将requests转化成字典
        obj = request_to_dict(request, self.spider)
        # 将字典转化为字符串并返回
        return self.serializer.dumps(obj)
    # 将已经编码的ncode_request解码为字典
    def _decode_request(self, encoded_request):
        """Decode an request previously encoded"""
        # 将dict转换为requests object取出,直接通过下载器进行下载
        obj = self.serializer.loads(encoded_request)
        return request_from_dict(obj, self.spider)
    # 下面的len方法 push方法 pop方法 必须重载 否则不能使用
    def __len__(self):
        """Return the length of the queue"""
        raise NotImplementedError

    def push(self, request):
        """Push a request"""
        raise NotImplementedError

    def pop(self, timeout=0):
        """Pop a request"""
        raise NotImplementedError
    # 删除指定的self.key的值
    def clear(self):
        """Clear queue/stack"""
        self.server.delete(self.key)

# 先进先出 针对有序队列
class FifoQueue(Base):
    """Per-spider FIFO queue"""
    # 返回队列长度
    def __len__(self):
        """Return the length of the queue"""
        return self.server.llen(self.key)
    # 从头部插入request
    def push(self, request):
        """Push a request"""
        self.server.lpush(self.key, self._encode_request(request))

    def pop(self, timeout=0):
        """Pop a request"""
        # timeout超时,一般默认为0
        if timeout > 0:
            #pop出来的时候是队尾弹出
            data = self.server.brpop(self.key, timeout)
            if isinstance(data, tuple):
                data = data[1]
        else:
            # 这个是从尾部删除
            data = self.server.rpop(self.key)
        if data:
            # 弹出元素再解码为request直接给下载器进行下载
            return self._decode_request(data)

# 优先级队列 每次放出的打一个分数,对于有序集合,弹出的时候优先弹出
class PriorityQueue(Base):
    """Per-spider priority queue abstraction using redis' sorted set"""

    def __len__(self):
        """Return the length of the queue"""
        return self.server.zcard(self.key)

    def push(self, request):
        """Push a request"""
        data = self._encode_request(request)
        score = -request.priority
        # We don't use zadd method as the order of arguments change depending on
        # whether the class is Redis or StrictRedis, and the option of using
        # kwargs only accepts strings, not bytes.
        # 使用有序集合实现优先级队列
        self.server.execute_command('ZADD', self.key, score, data)

    def pop(self, timeout=0):
        """
        Pop a request
        timeout not support in this queue class
        """
        # use atomic range/remove using multi/exec
        # pipeline其实就是self.server的一个方法
        # pipe相当于实例化的函数
        pipe = self.server.pipeline()
        pipe.multi()
        # zrange是从小到大排序后返回第一个值
        # zremrangebyrank是删除第一个request
        pipe.zrange(self.key, 0, 0).zremrangebyrank(self.key, 0, 0)
        # 执行上面的语句,删除的同时返回被删除的数据
        # results接收的是第一条数据
        # count 删除的元素,返回值是1或0
        results, count = pipe.execute()
        if results: # 只要有一个元素results是真值
            # 将获取的第一个元素(返回的是一个列表),拿出来,进行解码
            return self._decode_request(results[0])

# 后进先出
class LifoQueue(Base):
    """Per-spider LIFO queue."""

    def __len__(self):
        """Return the length of the stack"""
        return self.server.llen(self.key)

    def push(self, request):
        """Push a request"""
        self.server.lpush(self.key, self._encode_request(request))

    def pop(self, timeout=0):
        """Pop a request"""
        if timeout > 0:
            data = self.server.blpop(self.key, timeout)
            if isinstance(data, tuple):
                data = data[1]
        else:
            data = self.server.lpop(self.key)

        if data:
            return self._decode_request(data)


# TODO: Deprecate the use of these names.
SpiderQueue = FifoQueue
SpiderStack = LifoQueue
SpiderPriorityQueue = PriorityQueue

scheduler.py

这个文件重写了scheduler类,用来代替scrapy.core.scheduler的原有调度器。实现原理是使用指定的一个redis内存作为数据存储的媒介,以达到各个爬虫之间的统一调度。   1、scheduler负责调度各个spider的request请求,scheduler初始化时,通过settings文件读取queue和dupefilters(url去重)的类型,配置queue和dupefilters使用的key(一般就是spider name加上queue或者dupefilters,这样对于同一种spider的不同实例,就会使用相同的数据块了)。   2、每当一个request要被调度时,enqueue_request被调用,scheduler使用dupefilters来判断这个url是否重复,如果不重复,就添加到queue的容器中(三种队列方式:先进先出,先进后出和优先级都可以,可以在settings中配置)。   3、当调度完成时,next_request被调用,scheduler就通过queue容器的接口,取出一个request,把他发送给相应的spider,让spider进行爬取工作。
import importlib

import six
from scrapy.utils.misc import load_object

from . import connection, defaults


# TODO: add SCRAPY_JOB support.
# FIXME
class Scheduler(object):
    """Redis-based scheduler

    Settings
    --------
    SCHEDULER_PERSIST : bool (default: False)
        Whether to persist or clear redis queue.
    SCHEDULER_FLUSH_ON_START : bool (default: False)
        Whether to flush redis queue on start.
    SCHEDULER_IDLE_BEFORE_CLOSE : int (default: 0)
        How many seconds to wait before closing if no message is received.
    SCHEDULER_QUEUE_KEY : str
        Scheduler redis key.
    SCHEDULER_QUEUE_CLASS : str
        Scheduler queue class.
    SCHEDULER_DUPEFILTER_KEY : str
        Scheduler dupefilter redis key.
    SCHEDULER_DUPEFILTER_CLASS : str
        Scheduler dupefilter class.
    SCHEDULER_SERIALIZER : str
        Scheduler serializer.

    """

    def __init__(self, server,
                 persist=False,
                 flush_on_start=False,
                 queue_key=defaults.SCHEDULER_QUEUE_KEY,
                 queue_cls=defaults.SCHEDULER_QUEUE_CLASS,
                 dupefilter_key=defaults.SCHEDULER_DUPEFILTER_KEY,
                 dupefilter_cls=defaults.SCHEDULER_DUPEFILTER_CLASS,
                 idle_before_close=0,
                 serializer=None):
        """Initialize scheduler.

        Parameters
        ----------
        server : Redis
            The redis server instance.
        persist : bool
            Whether to flush requests when closing. Default is False.
        flush_on_start : bool
            Whether to flush requests on start. Default is False.
        queue_key : str
            Requests queue key.
        queue_cls : str
            Importable path to the queue class.
        dupefilter_key : str
            Duplicates filter key.
        dupefilter_cls : str
            Importable path to the dupefilter class.
        idle_before_close : int
            Timeout before giving up.

        """
        if idle_before_close < 0:
            raise TypeError("idle_before_close cannot be negative")

        self.server = server
        self.persist = persist
        self.flush_on_start = flush_on_start
        self.queue_key = queue_key
        self.queue_cls = queue_cls
        self.dupefilter_cls = dupefilter_cls
        self.dupefilter_key = dupefilter_key
        self.idle_before_close = idle_before_close
        self.serializer = serializer
        self.stats = None

    def __len__(self):
        return len(self.queue)

    @classmethod
    def from_settings(cls, settings): #作为入口
        kwargs = {
            #是否将队列持久化
            'persist': settings.getbool('SCHEDULER_PERSIST'),
            #是否将队列中的数据清空
            'flush_on_start': settings.getbool('SCHEDULER_FLUSH_ON_START'),
            'idle_before_close': settings.getint('SCHEDULER_IDLE_BEFORE_CLOSE'),
        }

        # If these values are missing, it means we want to use the defaults.
        optional = {
            # TODO: Use custom prefixes for this settings to note that are
            # specific to scrapy-redis.
            'queue_key': 'SCHEDULER_QUEUE_KEY',
            'queue_cls': 'SCHEDULER_QUEUE_CLASS', #默认作为优先队列
            'dupefilter_key': 'SCHEDULER_DUPEFILTER_KEY', #去重
            # We use the default setting name to keep compatibility.
            'dupefilter_cls': 'DUPEFILTER_CLASS',
            'serializer': 'SCHEDULER_SERIALIZER',
        }
        for name, setting_name in optional.items():
            val = settings.get(setting_name)
            if val:
                kwargs[name] = val

        # Support serializer as a path to a module.
        if isinstance(kwargs.get('serializer'), six.string_types):
            kwargs['serializer'] = importlib.import_module(kwargs['serializer'])
        # redis的连接实例
        server = connection.from_settings(settings)
        # 验证
        # Ensure the connection is working.
        server.ping()

        return cls(server=server, **kwargs)

    @classmethod
    def from_crawler(cls, crawler):
        instance = cls.from_settings(crawler.settings)
        # FIXME: for now, stats are only supported from this constructor
        instance.stats = crawler.stats
        return instance

    def open(self, spider):
        self.spider = spider

        try:
            self.queue = load_object(self.queue_cls)(
                server=self.server,
                spider=spider,
                key=self.queue_key % {'spider': spider.name},
                serializer=self.serializer,
            )
        except TypeError as e:
            raise ValueError("Failed to instantiate queue class '%s': %s",
                             self.queue_cls, e)

        self.df = load_object(self.dupefilter_cls).from_spider(spider)

        if self.flush_on_start:
            self.flush()
        # notice if there are requests already in the queue to resume the crawl
        if len(self.queue):
            spider.log("Resuming crawl (%d requests scheduled)" % len(self.queue))

    def close(self, reason):
        if not self.persist:
            self.flush()

    def flush(self):
        self.df.clear()
        self.queue.clear()
    # 入队函数
    def enqueue_request(self, request):
        # self.df.request_seen(request) 返回的bool值,返回True代表request存在
        # not request.dont_filter 默认返回是true
        # 当我们选择是过滤而且request 已经进入队列,我们返回一个false
        if not request.dont_filter and self.df.request_seen(request):
            self.df.log(request, self.spider)
            return False
        # 一般用不到,默认是none
        if self.stats:
            self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
        self.queue.push(request)
        return True
    # 出队函数
    def next_request(self):
        block_pop_timeout = self.idle_before_close
        # 弹出一条数据
        request = self.queue.pop(block_pop_timeout)
        if request and self.stats:
            self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)
        # 返回request给引擎,引擎给下载器,进行下载网页
        return request

    def has_pending_requests(self):
        return len(self) > 0

spiders.py

spider.py文件是分布式爬虫的入口代码:   1、通过connection接口,spider初始化时,通过setup_redis()函数初始化好和redis的连接。   2、通过next_requests函数从redis中取出strat url,spider使用少量的start url + LinkExtractor,可以发展出很多新的url,这些url会进入scheduler进行判重和调度。直到spider跑到调度池内没有url的时候,会触发spider_idle信号,从而触发spider的next_requests函数。   3、再次从redis的start url池中读取一些url。
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from scrapy.spiders import Spider, CrawlSpider

from . import connection, defaults
from .utils import bytes_to_str


class RedisMixin(object):
    """Mixin class to implement reading urls from a redis queue."""
    redis_key = None # 在redis里起始url对应的key
    redis_batch_size = None # 容量
    redis_encoding = None # 字符集编码

    # Redis client placeholder.
    server = None
    #重写start_request方法调用next_requests
    def start_requests(self):
        """Returns a batch of start requests from redis."""
        return self.next_requests()

    def setup_redis(self, crawler=None):
        """Setup redis connection and idle signal.

        This should be called after the spider has set its crawler object.
        """
        if self.server is not None:
            return

        if crawler is None:
            # We allow optional crawler argument to keep backwards
            # compatibility.
            # XXX: Raise a deprecation warning.
            crawler = getattr(self, 'crawler', None)

        if crawler is None:
            raise ValueError("crawler is required")

        settings = crawler.settings

        if self.redis_key is None:
            self.redis_key = settings.get(
                'REDIS_START_URLS_KEY', defaults.START_URLS_KEY,
            )

        self.redis_key = self.redis_key % {'name': self.name}

        if not self.redis_key.strip():
            raise ValueError("redis_key must not be empty")

        if self.redis_batch_size is None:
            # TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
            self.redis_batch_size = settings.getint(
                'REDIS_START_URLS_BATCH_SIZE',
                settings.getint('CONCURRENT_REQUESTS'),
            )

        try:
            self.redis_batch_size = int(self.redis_batch_size)
        except (TypeError, ValueError):
            raise ValueError("redis_batch_size must be an integer")

        if self.redis_encoding is None:
            self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)

        self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
                         "(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
                         self.__dict__)
        #redis连接实例
        self.server = connection.from_settings(crawler.settings)
        # The idle signal is called when the spider has no requests left,
        # that's when we will schedule new requests from redis queue
        crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)

    def next_requests(self):
        """Returns a request to be scheduled or none."""
        # 默认使用redis_keys是一个列表,否则是集合
        use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)
        #从redis数据库里取出起始url
        #use_set=false 返回self.server.lpop(列表数据类型)
        #use_set=true 返回self.server.spop(集合类型)
        fetch_one = self.server.spop if use_set else self.server.lpop
        # XXX: Do we need to use a timeout here?
        found = 0
        # TODO: Use redis pipeline execution.
        while found < self.redis_batch_size:
            #从数据库中取出起始url数据,返回一个列表
            data = fetch_one(self.redis_key)
            if not data:
                # Queue empty.
                break
            #取出的url是一个bytes类型,需要转换为str兼容python3
            req = self.make_request_from_data(data)
            if req:
                yield req # 把req给Request
                found += 1
            else:
                self.logger.debug("Request not made from data: %r", data)

        if found:
            self.logger.debug("Read %s requests from '%s'", found, self.redis_key)
    #返回的是requestss实例,是通过来自redis的data数据
    def make_request_from_data(self, data):
        """Returns a Request instance from data coming from Redis.

        By default, ``data`` is an encoded URL. You can override this method to
        provide your own message decoding.

        Parameters
        ----------
        data : bytes
            Message from redis.

        """
        url = bytes_to_str(data, self.redis_encoding)
        return self.make_requests_from_url(url)

    def schedule_next_requests(self):
        """Schedules a request if available"""
        # TODO: While there is capacity, schedule a batch of redis requests.
        for req in self.next_requests():
            self.crawler.engine.crawl(req, spider=self)

    def spider_idle(self):
        """Schedules a request if available, otherwise waits."""
        # XXX: Handle a sentinel to close the spider.
        self.schedule_next_requests()
        raise DontCloseSpider


class RedisSpider(RedisMixin, Spider):
    """Spider that reads urls from redis queue when idle.

    Attributes
    ----------
    redis_key : str (default: REDIS_START_URLS_KEY)
        Redis key where to fetch start URLs from..
    redis_batch_size : int (default: CONCURRENT_REQUESTS)
        Number of messages to fetch from redis on each attempt.
    redis_encoding : str (default: REDIS_ENCODING)
        Encoding to use when decoding messages from redis queue.

    Settings
    --------
    REDIS_START_URLS_KEY : str (default: "<spider.name>:start_urls")
        Default Redis key where to fetch start URLs from..
    REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)
        Default number of messages to fetch from redis on each attempt.
    REDIS_START_URLS_AS_SET : bool (default: False)
        Use SET operations to retrieve messages from the redis queue. If False,
        the messages are retrieve using the LPOP command.
    REDIS_ENCODING : str (default: "utf-8")
        Default encoding to use when decoding messages from redis queue.

    """

    @classmethod
    def from_crawler(self, crawler, *args, **kwargs):
        obj = super(RedisSpider, self).from_crawler(crawler, *args, **kwargs)
        obj.setup_redis(crawler)
        return obj


class RedisCrawlSpider(RedisMixin, CrawlSpider):
    """Spider that reads urls from redis queue when idle.

    Attributes
    ----------
    redis_key : str (default: REDIS_START_URLS_KEY)
        Redis key where to fetch start URLs from..
    redis_batch_size : int (default: CONCURRENT_REQUESTS)
        Number of messages to fetch from redis on each attempt.
    redis_encoding : str (default: REDIS_ENCODING)
        Encoding to use when decoding messages from redis queue.

    Settings
    --------
    REDIS_START_URLS_KEY : str (default: "<spider.name>:start_urls")
        Default Redis key where to fetch start URLs from..
    REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)
        Default number of messages to fetch from redis on each attempt.
    REDIS_START_URLS_AS_SET : bool (default: True)
        Use SET operations to retrieve messages from the redis queue.
    REDIS_ENCODING : str (default: "utf-8")
        Default encoding to use when decoding messages from redis queue.

    """

    @classmethod
    def from_crawler(self, crawler, *args, **kwargs):
        obj = super(RedisCrawlSpider, self).from_crawler(crawler, *args, **kwargs)
        obj.setup_redis(crawler)
        return obj

utils.py

import six


def bytes_to_str(s, encoding='utf-8'):
    """Returns a str if a bytes object is given."""
    #将我们的bytes类型转化为字符串
    if six.PY3 and isinstance(s, bytes):
        return s.decode(encoding)
    return s

**最后总结一下scrapy-redis的总体思路** 这个工程通过重写scheduler和spider类,实现了scheduler调度、spider启动和固定redis的交互。 实现新的dupefilter和queue类,达到了去重和调度容器和redis的交互,因为每个主机上的爬虫进程都访问同一个redis数据库,所以调度和去重都统一进行统一管理,达到了分布式爬虫的目的。 当spider被初始化时,同时会初始化一个对应的scheduler对象,这个调度器对象通过读取settings,配置好自己的调度容器queue和判重工具dupefilter。 每当一个spider产出一个request的时候,scrapy内核会把这个reuqest递交给这个spider对应的scheduler对象进行调度,scheduler对象通过访问redis对request进行判重,如果不重复就把他添加进redis中的调度池。 当调度条件满足时,scheduler对象就从redis的调度池中取出一个request发送给spider,让他爬取。 当spider爬取的所有暂时可用url之后,scheduler发现这个spider对应的redis的调度池空了,于是触发信号spider_idle,spider收到这个信号之后,直接连接redis读取strart url池,拿去新的一批url入口,然后再次重复上边的工作。
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章