scrapy框架之Item Pipeline 360图片遇到的坑

爬取目标:爬取360图片前50页并且实现本地CSV格式保存和Mongodb保存数据,下载支持异步和多线程,图片下载使用ImagePipeline内置类重写, 理解好Item Pipeline组件

重点内容
第一次运行报错:
报错ImportError:No module named ‘PIL’

pip install -i https://pypi.douban.com/simple pillow 安装模块组件

注意爬取的url是Json格式,在XHR里可以查看
注意爬取的目标地址是:https://image.so.com/z?ch=photography
例如:
Request URL:
https://image.so.com/zj?ch=photography&sn=30&listtype=new&temp=1
提取base_url的是https://image.so.com/zj? 而不是我们的目标地址的https://image.so.com/z? 这个问题我被坑了,之前我们爬取的并不是Ajax格式,后续需要注意
上主要代码:

images.py

-- coding: utf-8 --

import scrapy
from scrapy import Spider, Request
from urllib.parse import urlencode
import json

from images360.items import ImageItem

class ImagesSpider(scrapy.Spider):
name = ‘images’
allowed_domains = [‘images.so.com’]
start_urls = [‘http://images.so.com/‘]

def parse(self, response):
    result  = json.loads(response.text)
    for image in result.get('list'):
        item = ImageItem()
        item['id'] = image.get('imageid')
        item['url'] = image.get('qhimg_url')
        item['title'] = image.get('group_title')
        item['thumb'] = image.get('qhimg_thumb_url')
        yield item

def start_requests(self):
    data = {'ch':'photography','listtype':'new'}
    base_url = 'http://image.so.com/zj?'

    for page in range(1, self.settings.get('MAX_PAGE') + 1):   #注意代码是否写的OK,本人settings写错,幸好有错误提示
        data['sn'] = page * 30
        #使用urlencode方法将字典转化成url的get参数,并且需要引入urllib.parse
        params = urlencode(data)
        url = base_url + params
        #需要引入scrapy.Request
        yield Request(url, self.parse)
        #需要在settings类修改ROBOTSTXT_OBEY = False

items.py

from scrapy import Item, Field
class ImageItem(Item):
collection = ‘images’
id = Field()
url = Field()
title =Field()
#缩略图
thumb = Field()

pipelines.py
import pymongo
from scrapy import Request
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline

class MongoPipeline(object):
def init(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db

@classmethod
def from_crawler(cls, crawler):
    return cls(
        mongo_uri=crawler.settings.get('MONGO_URI'),
        mongo_db=crawler.settings.get('MONGO_DB')
    )

def open_spider(self, spider):
    self.client = pymongo.MongoClient(self.mongo_uri)
    self.db = self.client[self.mongo_db]

def process_item(self, item, spider):
    name = item.collection
    self.db[name].insert(dict(item))
    return item

def close_spider(self, spider):
    self.client.close()

class ImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
url = request.url
file_name = url.split(‘/’)[-1]
return file_name

def item_completed(self, results, item, info):
    image_paths = [x['path'] for ok, x in results if ok]
    if not image_paths:
        raise DropItem('Image Downloaded Failed')
    return item

def get_media_requests(self, item, info):
    yield Request(item['url'])

在settings.py添加
ROBOTSTXT_OBEY = False #不设置无法爬取
ITEM_PIPELINES = { #有Pipeline几个添加几个
‘images360.pipelines.ImagePipeline’: 300,
‘images360.pipelines.MongoPipeline’: 301,
}

IMAGES_STORE = ‘./images’

MAX_PAGE = 50

MONGO_URI = ‘localhost’
MONGO_DB = ‘images360’

pipelines.py

import pymongo
from scrapy import Request
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline

class MongoPipeline(object):
def init(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db

@classmethod
def from_crawler(cls, crawler):
    return cls(
        mongo_uri=crawler.settings.get('MONGO_URI'),
        mongo_db=crawler.settings.get('MONGO_DB')
    )

def open_spider(self, spider):
    self.client = pymongo.MongoClient(self.mongo_uri)
    self.db = self.client[self.mongo_db]

def process_item(self, item, spider):
    name = item.collection
    self.db[name].insert(dict(item))
    return item

def close_spider(self, spider):
    self.client.close()

class ImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
url = request.url
file_name = url.split(‘/’)[-1]
return file_name

def item_completed(self, results, item, info):
    image_paths = [x['path'] for ok, x in results if ok]
    if not image_paths:
        raise DropItem('Image Downloaded Failed')
    return item

def get_media_requests(self, item, info):
    yield Request(item['url'])

大神完整代码:里面包含Mysql数据库的存储
https://github.com/Python3WebSpider/Images360/tree/master/images360

参考资料:
https://blog.csdn.net/kuangshp128/article/details/80321099

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章