import six
from scrapy.utils.misc import load_object
from.import defaults
# Shortcut maps 'setting name' -> 'parmater name'.# redis数据库的关系映射
SETTINGS_PARAMS_MAP ={'REDIS_URL':'url','REDIS_HOST':'host','REDIS_PORT':'port','REDIS_ENCODING':'encoding',}defget_redis_from_settings(settings):# 获取一个redis连接实例# 生成连接redis参数"""Returns a redis client instance from given Scrapy settings object.
This function uses ``get_client`` to instantiate the client and uses
``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You
can override them using the ``REDIS_PARAMS`` setting.
Parameters
----------
settings : Settings
A scrapy settings object. See the supported settings below.
Returns
-------
server
Redis client instance.
Other Parameters
----------------
REDIS_URL : str, optional
Server connection URL.
REDIS_HOST : str, optional
Server host.
REDIS_PORT : str, optional
Server port.
REDIS_ENCODING : str, optional
Data encoding.
REDIS_PARAMS : dict, optional
Additional client parameters.
"""# 浅拷贝,是为了防止params改变,会导致默认的REDIS_PARAMS被改变
params = defaults.REDIS_PARAMS.copy()# 将settings中的参数更新到params
params.update(settings.getdict('REDIS_PARAMS'))# XXX: Deprecate REDIS_* settings.# 遍历映射表,获取指定的参数for source, dest in SETTINGS_PARAMS_MAP.items():# 优先使用settings中的参数
val = settings.get(source)# 如果settings中没有进行设置,则params不更新if val:
params[dest]= val
# Allow ``redis_cls`` to be a path to a class.ifisinstance(params.get('redis_cls'), six.string_types):
params['redis_cls']= load_object(params['redis_cls'])return get_redis(**params)# Backwards compatible alias.
from_settings = get_redis_from_settings
defget_redis(**kwargs):"""Returns a redis client instance.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
Extra parameters to be passed to the ``redis_cls`` class.
Returns
-------
server
Redis client instance.
"""# 没有redis_cli,则默认redis连接
redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
url = kwargs.pop('url',None)# 判断kwargs有没有urlif url:#用url链接redis,优先使用url连接redisreturn redis_cls.from_url(url,**kwargs)else:#用字典的方式连接redisreturn redis_cls(**kwargs)
import logging
import time
from scrapy.dupefilters import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
from.import defaults
from.connection import get_redis_from_settings
logger = logging.getLogger(__name__)# scrapy去重是利用集合实现的# TODO: Rename class to RedisDupeFilter.classRFPDupeFilter(BaseDupeFilter):"""Redis-based request duplicates filter.
This class can also be used with default Scrapy's scheduler.
"""
logger = logger
def__init__(self, server, key, debug=False):"""Initialize the duplicates filter.
Parameters
----------
server : redis.StrictRedis
The redis server instance.
redis 连接实例
key : str 存储requests指纹的地方
Redis key Where to store fingerprints.
debug : bool, optional
Whether to log filtered requests.
是否记录过滤的requests
"""#看server是如何生成的,因为我们通过server就可以获取redis中的队列或者set
self.server = server
self.key = key
self.debug = debug
self.logdupes =True# 类方法传递当前的方法
@classmethoddeffrom_settings(cls, settings):"""Returns an instance from given settings.
This uses by default the key ``dupefilter:<timestamp>``. When using the
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
it needs to pass the spider name in the key.
Parameters
----------
settings : scrapy.settings.Settings
Returns
-------
RFPDupeFilter
A RFPDupeFilter instance.
"""# 获取redis的连接实例
server = get_redis_from_settings(settings)# XXX: This creates one-time key. needed to support to use this# class as standalone dupefilter with scrapy's default scheduler# if scrapy passes spider on open() method this wouldn't be needed# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.# 存取指纹的key
key = defaults.DUPEFILTER_KEY %{'timestamp':int(time.time())}
debug = settings.getbool('DUPEFILTER_DEBUG')# 默认值是false# 传给当前类,并把参数传递给init函数return cls(server, key=key, debug=debug)
@classmethoddeffrom_crawler(cls, crawler):"""Returns instance from crawler.
Parameters
----------
crawler : scrapy.crawler.Crawler
Returns
-------
RFPDupeFilter
Instance of RFPDupeFilter.
"""return cls.from_settings(crawler.settings)defrequest_seen(self, request):"""Returns True if request was already seen.
Parameters
----------
request : scrapy.http.Request
Returns
-------
bool
"""
fp = self.request_fingerprint(request)# 生成一个指纹# This returns the number of values added, zero if already exists.# 将 指纹加入redis 是一个集合类型# self.server redis连接实例# self.key 存储指纹的key# fp 就是指纹
added = self.server.sadd(self.key, fp)# 当added为0,说明指纹已经存在,返回True,否则返回Falsereturn added ==0defrequest_fingerprint(self, request):"""Returns a fingerprint for a given request.
Parameters
----------
request : scrapy.http.Request
Returns
-------
str
"""return request_fingerprint(request)
@classmethoddeffrom_spider(cls, spider):
settings = spider.settings
server = get_redis_from_settings(settings)
dupefilter_key = settings.get("SCHEDULER_DUPEFILTER_KEY", defaults.SCHEDULER_DUPEFILTER_KEY)
key = dupefilter_key %{'spider': spider.name}
debug = settings.getbool('DUPEFILTER_DEBUG')return cls(server, key=key, debug=debug)defclose(self, reason=''):# 当爬虫结束时,清空指纹的地方"""Delete data on close. Called by Scrapy's scheduler.
Parameters
----------
reason : str, optional
"""
self.clear()defclear(self):"""Clears fingerprints data."""
self.server.delete(self.key)# 生成日志的地方deflog(self, request, spider):"""Logs given request.
Parameters
----------
request : scrapy.http.Request
spider : scrapy.spiders.Spider
"""if self.debug:
msg ="Filtered duplicate request: %(request)s"
self.logger.debug(msg,{'request': request}, extra={'spider': spider})elif self.logdupes:
msg =("Filtered duplicate request %(request)s"" - no more duplicates will be shown"" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg,{'request': request}, extra={'spider': spider})
self.logdupes =False
from scrapy.utils.reqser import request_to_dict, request_from_dict
from.import picklecompat
classBase(object):"""Per-spider base queue class"""def__init__(self, server, spider, key, serializer=None):"""Initialize per-spider redis queue.
Parameters
----------
server : StrictRedis
Redis client instance.
spider : Spider
Scrapy spider instance.
key: str
Redis key where to put and get messages.
serializer : object
Serializer object with ``loads`` and ``dumps`` methods.
"""if serializer isNone:# Backward compatibility.# TODO: deprecate pickle.
serializer = picklecompat
# 当序列化时没有laods函数时,就会抛出异常# 抛出异常的目的就是为了使传过来的序列化必须函数loads函数ifnothasattr(serializer,'loads'):raise TypeError("serializer does not implement 'loads' function: %r"% serializer)# 当序列化时没有dumps函数时,就会抛出异常ifnothasattr(serializer,'dumps'):raise TypeError("serializer '%s' does not implement 'dumps' function: %r"% serializer)# 下面的这些函数当类的所有函数,都可以使用
self.server = server
self.spider = spider
self.key = key %{'spider': spider.name}
self.serializer = serializer
# 将requests进行编码成字符串def_encode_request(self, request):"""Encode a request object"""# 将requests转化成字典
obj = request_to_dict(request, self.spider)# 将字典转化为字符串并返回return self.serializer.dumps(obj)# 将已经编码的ncode_request解码为字典def_decode_request(self, encoded_request):"""Decode an request previously encoded"""# 将dict转换为requests object取出,直接通过下载器进行下载
obj = self.serializer.loads(encoded_request)return request_from_dict(obj, self.spider)# 下面的len方法 push方法 pop方法 必须重载 否则不能使用def__len__(self):"""Return the length of the queue"""raise NotImplementedError
defpush(self, request):"""Push a request"""raise NotImplementedError
defpop(self, timeout=0):"""Pop a request"""raise NotImplementedError
# 删除指定的self.key的值defclear(self):"""Clear queue/stack"""
self.server.delete(self.key)# 先进先出 针对有序队列classFifoQueue(Base):"""Per-spider FIFO queue"""# 返回队列长度def__len__(self):"""Return the length of the queue"""return self.server.llen(self.key)# 从头部插入requestdefpush(self, request):"""Push a request"""
self.server.lpush(self.key, self._encode_request(request))defpop(self, timeout=0):"""Pop a request"""# timeout超时,一般默认为0if timeout >0:#pop出来的时候是队尾弹出
data = self.server.brpop(self.key, timeout)ifisinstance(data,tuple):
data = data[1]else:# 这个是从尾部删除
data = self.server.rpop(self.key)if data:# 弹出元素再解码为request直接给下载器进行下载return self._decode_request(data)# 优先级队列 每次放出的打一个分数,对于有序集合,弹出的时候优先弹出classPriorityQueue(Base):"""Per-spider priority queue abstraction using redis' sorted set"""def__len__(self):"""Return the length of the queue"""return self.server.zcard(self.key)defpush(self, request):"""Push a request"""
data = self._encode_request(request)
score =-request.priority
# We don't use zadd method as the order of arguments change depending on# whether the class is Redis or StrictRedis, and the option of using# kwargs only accepts strings, not bytes.# 使用有序集合实现优先级队列
self.server.execute_command('ZADD', self.key, score, data)defpop(self, timeout=0):"""
Pop a request
timeout not support in this queue class
"""# use atomic range/remove using multi/exec# pipeline其实就是self.server的一个方法# pipe相当于实例化的函数
pipe = self.server.pipeline()
pipe.multi()# zrange是从小到大排序后返回第一个值# zremrangebyrank是删除第一个request
pipe.zrange(self.key,0,0).zremrangebyrank(self.key,0,0)# 执行上面的语句,删除的同时返回被删除的数据# results接收的是第一条数据# count 删除的元素,返回值是1或0
results, count = pipe.execute()if results:# 只要有一个元素results是真值# 将获取的第一个元素(返回的是一个列表),拿出来,进行解码return self._decode_request(results[0])# 后进先出classLifoQueue(Base):"""Per-spider LIFO queue."""def__len__(self):"""Return the length of the stack"""return self.server.llen(self.key)defpush(self, request):"""Push a request"""
self.server.lpush(self.key, self._encode_request(request))defpop(self, timeout=0):"""Pop a request"""if timeout >0:
data = self.server.blpop(self.key, timeout)ifisinstance(data,tuple):
data = data[1]else:
data = self.server.lpop(self.key)if data:return self._decode_request(data)# TODO: Deprecate the use of these names.
SpiderQueue = FifoQueue
SpiderStack = LifoQueue
SpiderPriorityQueue = PriorityQueue
import importlib
import six
from scrapy.utils.misc import load_object
from.import connection, defaults
# TODO: add SCRAPY_JOB support.# FIXMEclassScheduler(object):"""Redis-based scheduler
Settings
--------
SCHEDULER_PERSIST : bool (default: False)
Whether to persist or clear redis queue.
SCHEDULER_FLUSH_ON_START : bool (default: False)
Whether to flush redis queue on start.
SCHEDULER_IDLE_BEFORE_CLOSE : int (default: 0)
How many seconds to wait before closing if no message is received.
SCHEDULER_QUEUE_KEY : str
Scheduler redis key.
SCHEDULER_QUEUE_CLASS : str
Scheduler queue class.
SCHEDULER_DUPEFILTER_KEY : str
Scheduler dupefilter redis key.
SCHEDULER_DUPEFILTER_CLASS : str
Scheduler dupefilter class.
SCHEDULER_SERIALIZER : str
Scheduler serializer.
"""def__init__(self, server,
persist=False,
flush_on_start=False,
queue_key=defaults.SCHEDULER_QUEUE_KEY,
queue_cls=defaults.SCHEDULER_QUEUE_CLASS,
dupefilter_key=defaults.SCHEDULER_DUPEFILTER_KEY,
dupefilter_cls=defaults.SCHEDULER_DUPEFILTER_CLASS,
idle_before_close=0,
serializer=None):"""Initialize scheduler.
Parameters
----------
server : Redis
The redis server instance.
persist : bool
Whether to flush requests when closing. Default is False.
flush_on_start : bool
Whether to flush requests on start. Default is False.
queue_key : str
Requests queue key.
queue_cls : str
Importable path to the queue class.
dupefilter_key : str
Duplicates filter key.
dupefilter_cls : str
Importable path to the dupefilter class.
idle_before_close : int
Timeout before giving up.
"""if idle_before_close <0:raise TypeError("idle_before_close cannot be negative")
self.server = server
self.persist = persist
self.flush_on_start = flush_on_start
self.queue_key = queue_key
self.queue_cls = queue_cls
self.dupefilter_cls = dupefilter_cls
self.dupefilter_key = dupefilter_key
self.idle_before_close = idle_before_close
self.serializer = serializer
self.stats =Nonedef__len__(self):returnlen(self.queue)
@classmethoddeffrom_settings(cls, settings):#作为入口
kwargs ={#是否将队列持久化'persist': settings.getbool('SCHEDULER_PERSIST'),#是否将队列中的数据清空'flush_on_start': settings.getbool('SCHEDULER_FLUSH_ON_START'),'idle_before_close': settings.getint('SCHEDULER_IDLE_BEFORE_CLOSE'),}# If these values are missing, it means we want to use the defaults.
optional ={# TODO: Use custom prefixes for this settings to note that are# specific to scrapy-redis.'queue_key':'SCHEDULER_QUEUE_KEY','queue_cls':'SCHEDULER_QUEUE_CLASS',#默认作为优先队列'dupefilter_key':'SCHEDULER_DUPEFILTER_KEY',#去重# We use the default setting name to keep compatibility.'dupefilter_cls':'DUPEFILTER_CLASS','serializer':'SCHEDULER_SERIALIZER',}for name, setting_name in optional.items():
val = settings.get(setting_name)if val:
kwargs[name]= val
# Support serializer as a path to a module.ifisinstance(kwargs.get('serializer'), six.string_types):
kwargs['serializer']= importlib.import_module(kwargs['serializer'])# redis的连接实例
server = connection.from_settings(settings)# 验证# Ensure the connection is working.
server.ping()return cls(server=server,**kwargs)
@classmethoddeffrom_crawler(cls, crawler):
instance = cls.from_settings(crawler.settings)# FIXME: for now, stats are only supported from this constructor
instance.stats = crawler.stats
return instance
defopen(self, spider):
self.spider = spider
try:
self.queue = load_object(self.queue_cls)(
server=self.server,
spider=spider,
key=self.queue_key %{'spider': spider.name},
serializer=self.serializer,)except TypeError as e:raise ValueError("Failed to instantiate queue class '%s': %s",
self.queue_cls, e)
self.df = load_object(self.dupefilter_cls).from_spider(spider)if self.flush_on_start:
self.flush()# notice if there are requests already in the queue to resume the crawliflen(self.queue):
spider.log("Resuming crawl (%d requests scheduled)"%len(self.queue))defclose(self, reason):ifnot self.persist:
self.flush()defflush(self):
self.df.clear()
self.queue.clear()# 入队函数defenqueue_request(self, request):# self.df.request_seen(request) 返回的bool值,返回True代表request存在# not request.dont_filter 默认返回是true# 当我们选择是过滤而且request 已经进入队列,我们返回一个falseifnot request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)returnFalse# 一般用不到,默认是noneif self.stats:
self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
self.queue.push(request)returnTrue# 出队函数defnext_request(self):
block_pop_timeout = self.idle_before_close
# 弹出一条数据
request = self.queue.pop(block_pop_timeout)if request and self.stats:
self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)# 返回request给引擎,引擎给下载器,进行下载网页return request
defhas_pending_requests(self):returnlen(self)>0
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from scrapy.spiders import Spider, CrawlSpider
from.import connection, defaults
from.utils import bytes_to_str
classRedisMixin(object):"""Mixin class to implement reading urls from a redis queue."""
redis_key =None# 在redis里起始url对应的key
redis_batch_size =None# 容量
redis_encoding =None# 字符集编码# Redis client placeholder.
server =None#重写start_request方法调用next_requestsdefstart_requests(self):"""Returns a batch of start requests from redis."""return self.next_requests()defsetup_redis(self, crawler=None):"""Setup redis connection and idle signal.
This should be called after the spider has set its crawler object.
"""if self.server isnotNone:returnif crawler isNone:# We allow optional crawler argument to keep backwards# compatibility.# XXX: Raise a deprecation warning.
crawler =getattr(self,'crawler',None)if crawler isNone:raise ValueError("crawler is required")
settings = crawler.settings
if self.redis_key isNone:
self.redis_key = settings.get('REDIS_START_URLS_KEY', defaults.START_URLS_KEY,)
self.redis_key = self.redis_key %{'name': self.name}ifnot self.redis_key.strip():raise ValueError("redis_key must not be empty")if self.redis_batch_size isNone:# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
self.redis_batch_size = settings.getint('REDIS_START_URLS_BATCH_SIZE',
settings.getint('CONCURRENT_REQUESTS'),)try:
self.redis_batch_size =int(self.redis_batch_size)except(TypeError, ValueError):raise ValueError("redis_batch_size must be an integer")if self.redis_encoding isNone:
self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)
self.logger.info("Reading start URLs from redis key '%(redis_key)s' ""(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
self.__dict__)#redis连接实例
self.server = connection.from_settings(crawler.settings)# The idle signal is called when the spider has no requests left,# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)defnext_requests(self):"""Returns a request to be scheduled or none."""# 默认使用redis_keys是一个列表,否则是集合
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)#从redis数据库里取出起始url#use_set=false 返回self.server.lpop(列表数据类型)#use_set=true 返回self.server.spop(集合类型)
fetch_one = self.server.spop if use_set else self.server.lpop
# XXX: Do we need to use a timeout here?
found =0# TODO: Use redis pipeline execution.while found < self.redis_batch_size:#从数据库中取出起始url数据,返回一个列表
data = fetch_one(self.redis_key)ifnot data:# Queue empty.break#取出的url是一个bytes类型,需要转换为str兼容python3
req = self.make_request_from_data(data)if req:yield req # 把req给Request
found +=1else:
self.logger.debug("Request not made from data: %r", data)if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key)#返回的是requestss实例,是通过来自redis的data数据defmake_request_from_data(self, data):"""Returns a Request instance from data coming from Redis.
By default, ``data`` is an encoded URL. You can override this method to
provide your own message decoding.
Parameters
----------
data : bytes
Message from redis.
"""
url = bytes_to_str(data, self.redis_encoding)return self.make_requests_from_url(url)defschedule_next_requests(self):"""Schedules a request if available"""# TODO: While there is capacity, schedule a batch of redis requests.for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self)defspider_idle(self):"""Schedules a request if available, otherwise waits."""# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()raise DontCloseSpider
classRedisSpider(RedisMixin, Spider):"""Spider that reads urls from redis queue when idle.
Attributes
----------
redis_key : str (default: REDIS_START_URLS_KEY)
Redis key where to fetch start URLs from..
redis_batch_size : int (default: CONCURRENT_REQUESTS)
Number of messages to fetch from redis on each attempt.
redis_encoding : str (default: REDIS_ENCODING)
Encoding to use when decoding messages from redis queue.
Settings
--------
REDIS_START_URLS_KEY : str (default: "<spider.name>:start_urls")
Default Redis key where to fetch start URLs from..
REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)
Default number of messages to fetch from redis on each attempt.
REDIS_START_URLS_AS_SET : bool (default: False)
Use SET operations to retrieve messages from the redis queue. If False,
the messages are retrieve using the LPOP command.
REDIS_ENCODING : str (default: "utf-8")
Default encoding to use when decoding messages from redis queue.
"""
@classmethoddeffrom_crawler(self, crawler,*args,**kwargs):
obj =super(RedisSpider, self).from_crawler(crawler,*args,**kwargs)
obj.setup_redis(crawler)return obj
classRedisCrawlSpider(RedisMixin, CrawlSpider):"""Spider that reads urls from redis queue when idle.
Attributes
----------
redis_key : str (default: REDIS_START_URLS_KEY)
Redis key where to fetch start URLs from..
redis_batch_size : int (default: CONCURRENT_REQUESTS)
Number of messages to fetch from redis on each attempt.
redis_encoding : str (default: REDIS_ENCODING)
Encoding to use when decoding messages from redis queue.
Settings
--------
REDIS_START_URLS_KEY : str (default: "<spider.name>:start_urls")
Default Redis key where to fetch start URLs from..
REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)
Default number of messages to fetch from redis on each attempt.
REDIS_START_URLS_AS_SET : bool (default: True)
Use SET operations to retrieve messages from the redis queue.
REDIS_ENCODING : str (default: "utf-8")
Default encoding to use when decoding messages from redis queue.
"""
@classmethoddeffrom_crawler(self, crawler,*args,**kwargs):
obj =super(RedisCrawlSpider, self).from_crawler(crawler,*args,**kwargs)
obj.setup_redis(crawler)return obj
utils.py
import six
defbytes_to_str(s, encoding='utf-8'):"""Returns a str if a bytes object is given."""#将我们的bytes类型转化为字符串if six.PY3 andisinstance(s,bytes):return s.decode(encoding)return s