基于Scrapy框架的Python新闻爬虫

一、任务需求

1. 爬取新闻网站的文章及评论

2. 新闻网页数目不少于10万页

3. 每个新闻网页及其评论能在1天内更新

二、功能设计

1. 设计一个网络爬虫,能够爬取指定网站的全部页面,并提取其中的文章及评论内容

2. 定时运行网络爬虫,实现每日更新数据

三、系统架构

首先简单介绍下scrapy框架,这是一个爬虫框架


绿线是数据流向,

(1)首先从初始URL 开始,Scheduler 会将其交给 Downloader 进行下载,

(2)下载之后会交给 Spider 进行分析,这里的spider就是爬虫的核心功能代码

(3)Spider分析出来的结果有两种:一种是需要进一步抓取的链接,它们会通过middleware传回 Scheduler ;另一种是需要保存的数据,送入Item Pipeline ,进行处理和存储

(4)最后将所有数据输出并保存为文件

四、代码实现

Spider

# -*- coding:utf-8 -*-
from scrapy.spiders import CrawlSpider, Rule
from ..items import newsItem
from scrapy.linkextractors import LinkExtractor
import re, requests, json
from scrapy.selector import Selector
count = 0

class news163_Spider(CrawlSpider):
    # 网易新闻爬虫名称
    name = "163news"
    # 伪装成浏览器
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
    }
    #网易全网
    allowed_domains = [
        "163.com"
    ]
    #新闻版
    start_urls = [
        'http://news.163.com/'
    ]
    #可以继续访问的url规则,http://news.163.com/\d\d\d\d\d(/([\w\._+-])*)*$
    rules = [
        Rule(LinkExtractor(
        allow=(
            ('http://news\.163\.com/.*$'),
            ('http://ent\.163\.com/.*$'),
            ('http://money\.163\.com/.*$'),
            ('http://war\.163\.com/.*$'),
            ('http://sport\.163\.com/.*$'),
            ('http://tech\.163\.com/.*$'),
            ('http://fashion\.163\.com/.*$'),
            ('http://auto\.163\.com/.*$'),
            ('http://jiankang\.163\.com/.*$')
        ),
        deny = ('http://.*.163.com/photo.*$')
        ),
        callback="parse_item",
        follow=True)
    ]
    def parse_item(self, response):
        # response是当前url的响应
        article = Selector(response)
        article_url = response.url
        global count
        # 分析网页类型
        # 比较新的网易新闻 http://news.163.com/05-17/
        if get_category(article) == 1:
            articleXpath = '//*[@id="epContentLeft"]'
            if article.xpath(articleXpath):
                titleXpath = '//*[@id="epContentLeft"]/h1/text()'
                dateXpath = '//*[@id="epContentLeft"]/div[1]/text()'
                contentXpath = '//*[@id="endText"]'
                news_infoXpath ='//*[@id="post_comment_area"]/script[3]/text()'

                # 标题
                if article.xpath(titleXpath):
                    news_item = newsItem()
                    news_item['url'] = article_url
                    get_title(article, titleXpath, news_item)
                    # 日期
                    if article.xpath(dateXpath):
                        get_date(article, dateXpath, news_item)
                    # 内容
                    if article.xpath(contentXpath):
                        get_content(article, contentXpath, news_item)
                        count = count + 1
                        news_item['id'] = count
                    # 评论
                    try:
                        comment_url = get_comment_url(article, news_infoXpath)
                        # 评论处理
                        comments = get_comment(comment_url, news_item)[1]
                        news_item['comments'] = comments
                    except:
                        news_item['comments'] = ' '
                        news_item['heat'] = 0
                    yield news_item

        # http://news.163.com/40706/
        if get_category(article) == 2:
            articleXpath = '/html/body/table[9]/tr/td[1]'
            if article.xpath(articleXpath):
                titleXpath = '/html/body/table[9]/tr/td[1]/table[1]/tr[1]/td/text()'
                dateXpath = '/html/body/table[9]/tr/td[1]/table[1]/tr[2]/td[2]/table/tbody/tr[2]/td[1]/text()[1]'
                contentXpath = '//*[@id="content"]'
                news_item = newsItem()
                news_item['url'] = article_url
                # 标题
                if article.xpath(titleXpath):
                    get_title(article, titleXpath, news_item)
                    # 日期
                    if article.xpath(dateXpath):
                        get_date(article, dateXpath, news_item)
                    # 内容
                    if article.xpath(contentXpath):
                        get_content(article, contentXpath, news_item)
                        count = count + 1
                        news_item['id'] = count
                        news_item['heat'] = 0
                        news_item['comments'] = ' '
                yield news_item

'''通用标题处理函数'''
def get_title(article, titleXpath, news_item):
    #标题
    try:
        article_title = article.xpath(titleXpath).extract()[0]
        article_title = article_title.replace('\n', '')
        article_title = article_title.replace('\r', '')
        article_title = article_title.replace('\t', '')
        article_title = article_title.replace(' ', '')
        news_item['title'] = article_title
    except:
        news_item['title'] = ' '


'''通用日期处理函数'''
def get_date(article, dateXpath, news_item):
    # 时间
    try:
        article_date = article.xpath(dateXpath).extract()[0]
        pattern = re.compile("(\d.*\d)")  # 正则匹配新闻时间
        article_datetime = pattern.findall(article_date)[0]
        #article_datetime = datetime.datetime.strptime(article_datetime, "%Y-%m-%d %H:%M:%S")
        news_item['date'] = article_datetime
    except:
        news_item['date'] = '2010-10-01 17:00:00'
'''网站分类函数'''
def get_category(article):
    if article.xpath('//*[@id="epContentLeft"]'):
        case = 1  # 最近的网易新闻
        return case

    elif article.xpath('/html/body/table[9]/tr/td[1]'):
        case = 2  # 零几年的网易新闻
        return case

'''字符过滤函数'''
def str_replace(content):
    # article_content = ' '.join(content)
    # rule = re.compile('\w')
    try:
        article_content = re.sub('[\sa-zA-Z\[\]!/*(^)$%~@#…&¥—+=_<>.{}\'\-:;"‘’|]', '', content)
        return article_content
    except:
        return content

'''通用正文处理函数'''
def get_content(article, contentXpath, news_item):
    try:
        content_data = article.xpath(contentXpath )
        article_content = content_data.xpath('string(.)').extract()[0]
        article_content = str_replace(article_content)
        news_item['content'] = article_content
        # 匹配新闻简介,前100个字
        try:
            abstract = article_content[0:100]
            news_item['abstract'] = abstract
        except 1:
            news_item['abstract'] = article_content
        # except 2:
        #     index = article_content.find('。')
        #     abstract = article_content[0:index]
        #     news_item['abstract'] = abstract
    except:
        news_item['content'] = ' '
        news_item['abstract'] = ' '

'''评论信息提取函数'''
def get_comment_url(article,news_infoXpath):
    news_info = article.xpath(news_infoXpath)
    news_info_text = news_info.extract()[0]
    pattern_productKey = re.compile("\"productKey\" :.*")
    productKey_text = pattern_productKey.findall(news_info_text)[0]
    productKey = re.findall(r"\"productKey\".*\"(.*)\"", productKey_text)
    pattern_docId = re.compile("\"docId\" :.*")
    docId_text = pattern_docId.findall(news_info_text)[0]
    docId = re.findall(r"\"docId\".*\"(.*)\"", docId_text)
    comment_url = 'http://comment.news.163.com/api/v1/products/' + productKey[0] + '/threads/' + docId[0] + '/comments/newList?offset=0'
    return comment_url

'''评论处理函数'''
def get_comment(comment_url, news_item):
    comments = []
    comment_id = 0
    try:
        comment_data = requests.get(comment_url).text
        js_comment = json.loads(comment_data)
        try:
            heat = js_comment['newListSize']
            news_item['heat'] = heat
            js_comments = js_comment['comments']
            for each,value in js_comments.items():
                comment_id += 1
                comments_dict = {}
                # 评论id
                comments_dict['id'] = comment_id
                # 评论用户名
                try:
                    comments_dict['username'] = value['user']['nickname']
                except:
                    comments_dict['username'] = '匿名用户'
                try:
                    # 评论时间,datetime格式
                    date_time = value['createTime']
                    #date_time = datetime.datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S")
                    comments_dict['date_time'] = date_time
                except:
                    comments_dict['date_time'] = news_item['date']
                # 评论内容
                ori_content = value['content']
                content = str_replace(ori_content)
                comments_dict['content'] = content
                comments.append(comments_dict)
                if comments:
                    return heat, comments
                else:
                    return 0,''
        except:
            return 0, ''
    except:
        return 0, ''


下面将代码分部解释

rules = [
        Rule(LinkExtractor(
        allow=(
            ('http://news\.163\.com/.*$'),
            ('http://ent\.163\.com/.*$'),
            ('http://money\.163\.com/.*$'),
            ('http://war\.163\.com/.*$'),
            ('http://sport\.163\.com/.*$'),
            ('http://tech\.163\.com/.*$'),
            ('http://fashion\.163\.com/.*$'),
            ('http://auto\.163\.com/.*$'),
            ('http://jiankang\.163\.com/.*$')
        ),
        deny = ('http://.*.163.com/photo.*$')
        ),
        callback="parse_item",
        follow=True)
    ]
这个是爬虫的网页访问规则,linkextractor根据其中的allow和deny规定的URL规则寻找并访问指定的链接(只允许访问某些板块并禁止访问带有某些后缀的网址)。callback=“parse_item”表示将URL的访问结果返回给方法parse_item进行处理,follow=True表示爬虫遍历起始页面后会继续遍历每个链接页面里的链接,直到达到停止要求,false表示只遍历起始页内的链接,不再继续跟进。

def parse_item(self, response):
        # response是当前url的响应
        article = Selector(response)
        article_url = response.url
        global count
        # 分析网页类型
        # 比较新的网易新闻 http://news.163.com/05-17/
        if get_category(article) == 1:
            articleXpath = '//*[@id="epContentLeft"]'
            if article.xpath(articleXpath):
                titleXpath = '//*[@id="epContentLeft"]/h1/text()'
                dateXpath = '//*[@id="epContentLeft"]/div[1]/text()'
                contentXpath = '//*[@id="endText"]'
                news_infoXpath ='//*[@id="post_comment_area"]/script[3]/text()'

                # 标题
                if article.xpath(titleXpath):
                    news_item = newsItem()
                    news_item['url'] = article_url
                    get_title(article, titleXpath, news_item)
                    # 日期
                    if article.xpath(dateXpath):
                        get_date(article, dateXpath, news_item)
                    # 内容
                    if article.xpath(contentXpath):
                        get_content(article, contentXpath, news_item)
                        count = count + 1
                        news_item['id'] = count
                    # 评论
                    try:
                        comment_url = get_comment_url(article, news_infoXpath)
                        # 评论处理
                        comments = get_comment(comment_url, news_item)[1]
                        news_item['comments'] = comments
                    except:
                        news_item['comments'] = ' '
                        news_item['heat'] = 0
                    yield news_item

由于网易的文章页面结构不止一种,所以需要分类进行爬取,根据页面xpath层级结构可以将整个网站分为若干种,这里只取主要的两种,通过浏览器的F12调试功能查看网页中不同内容所属的xpath,后续通过xpath进行内容定位

'''评论信息提取函数'''
def get_comment_url(article,news_infoXpath):
    news_info = article.xpath(news_infoXpath)
    news_info_text = news_info.extract()[0]
    pattern_productKey = re.compile("\"productKey\" :.*")
    productKey_text = pattern_productKey.findall(news_info_text)[0]
    productKey = re.findall(r"\"productKey\".*\"(.*)\"", productKey_text)
    pattern_docId = re.compile("\"docId\" :.*")
    docId_text = pattern_docId.findall(news_info_text)[0]
    docId = re.findall(r"\"docId\".*\"(.*)\"", docId_text)
    comment_url = 'http://comment.news.163.com/api/v1/products/' + productKey[0] + '/threads/' + docId[0] + '/comments/newList?offset=0'
    return comment_url

'''评论处理函数'''
def get_comment(comment_url, news_item):
    comments = []
    comment_id = 0
    try:
        comment_data = requests.get(comment_url).text
        js_comment = json.loads(comment_data)
        try:
            heat = js_comment['newListSize']
            news_item['heat'] = heat
            js_comments = js_comment['comments']
            for each,value in js_comments.items():
                comment_id += 1
                comments_dict = {}
                # 评论id
                comments_dict['id'] = comment_id
                # 评论用户名
                try:
                    comments_dict['username'] = value['user']['nickname']
                except:
                    comments_dict['username'] = '匿名用户'
                try:
                    # 评论时间,datetime格式
                    date_time = value['createTime']
                    #date_time = datetime.datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S")
                    comments_dict['date_time'] = date_time
                except:
                    comments_dict['date_time'] = news_item['date']
                # 评论内容
                ori_content = value['content']
                content = str_replace(ori_content)
                comments_dict['content'] = content
                comments.append(comments_dict)
                if comments:
                    return heat, comments
                else:
                    return 0,''
        except:
            return 0, ''
    except:
        return 0, ''
新闻的评论是用JavaScript动态生成的,源代码中没有,所以不能再用xpath的方式提取,这里使用一个讨巧的方法,通过浏览器调试功能,找到加载评论对应的网页响应,分析评论页的链接URL组成,在新闻页的源代码中找到对应关键字,拼凑而成

Item

from scrapy import Item, Field

class newsItem(Item):#新闻
    # 文章标题
    title = Field()
    # 时间
    date = Field()
    # 正文
    content = Field()
    #简介(20个字)
    abstract = Field()
    # 文章热度(参与数)
    heat = Field()
    # ID
    id = Field()
    # 链接
    url = Field()
    # 评论字典
    comments = Field()
Item里存放的是爬取下来的新闻各部分,送入pipeline格式化输出到文件

Pipeline

import json
import codecs
class ScrapyspiderPipeline(object):
    def __init__(self):
        self.file = codecs.open('xxxx.json', 'w', encoding='utf-8')
    def process_item(self, item, spider):
        line = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.file.write(line)
        return item
    def spider_closed(self, spider):
        self.file.close()

管道,用于将Item里存放的数据输出到文件,可以输出为json或csv,默认是不输出的

settings

BOT_NAME = 'scrapyspider'

SPIDER_MODULES = ['scrapyspider.spiders']
NEWSPIDER_MODULE = 'scrapyspider.spiders'
# Configure maximum concurrent requests performed by Scrapy (default: 16)最大并发请求数
CONCURRENT_REQUESTS = 128
COOKIES_ENABLED = False
FEED_EXPORT_ENCODING = 'utf-8'
DOWNLOAD_DELAY = 0.01
CLOSESPIDER_ITEMCOUNT = 50100 
DOWNLOAD_TIMEOUT = 10 
COOKIES_ENABLED = False
禁止cookies,防止被ban
DOWNLOAD_DELAY = 0.01
下载延时,减轻服务器压力,防止被ban,
CLOSESPIDER_ITEMCOUNT = 50100
爬取页面的数量,爬到指定数量后停止跟进,输出文件并退出爬虫
DOWNLOAD_TIMEOUT = 10

单个页面爬取超时时间,超时后舍弃该页面

Launch

import time
import datetime
from scrapy import cmdline

def runnews(h, m):
    '''h表示设定的小时,m为设定的分钟'''
    while True:
        # 判断是否达到设定时间,例如0:00
        while True:
            now = datetime.datetime.now()
            if (now.hour == h and now.minute >= m) or (now.hour > h):
                break
            # 每10秒再次检测
            time.sleep(10)

        cmdline.execute("scrapy crawl ifengnews -o xxxx.csv".split())

runnews(8, 38)
定时爬取模块

项目文件


五、总结

这个项目是我第一次用Python写的一个中等规模的项目,其实没什么技术含量,因为是第一次所以遇到很多问题,无论是Python本身的还是网络方面的等等,算是一次不错的练手吧
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章