爬蟲總結

一開始接觸用python 寫爬蟲用的是bs4 request urllib2 這些庫,簡單爬取網頁簡直不要太簡單

類似這種:

# -*- coding: utf-8 -*-
#---------------------------------------
#   程序:百度貼吧爬蟲
#   版本:0.1
#   作者:why
#   日期:2013-05-14
#   語言:Python 2.7
#   操作:輸入帶分頁的地址,去掉最後面的數字,設置一下起始頁數和終點頁數。
#   功能:下載對應頁碼內的所有頁面並存儲爲html文件。
#---------------------------------------
 
import string, urllib2
 
#定義百度函數
def baidu_tieba(url,begin_page,end_page):   
    for i in range(begin_page, end_page+1):
        sName = string.zfill(i,5) + '.html'#自動填充成六位的文件名
        print u'正在下載第' + str(i) + u'個網頁,並將其存儲爲' + sName + '......'
        f = open(sName,'w+')
        m = urllib2.urlopen(url + str(i)).read()
        f.write(m)
        f.close()
 
 
#-------- 在這裏輸入參數 ------------------

print u"""#---------------------------------------
#   程序:百度貼吧爬蟲
#   版本:0.1
#   作者:why
#   日期:2013-05-14
#   語言:Python 2.7
#   操作:輸入帶分頁的地址,去掉最後面的數字,設置一下起始頁數和終點頁數。
#   功能:下載對應頁碼內的所有頁面並存儲爲html文件。
#---------------------------------------
"""

# 這個是山東大學的百度貼吧中某一個帖子的地址
#bdurl = 'http://tieba.baidu.com/p/2296017831?pn='
#iPostBegin = 1
#iPostEnd = 10



print u'例如:http://tieba.baidu.com/p/2296017831?pn='
print u'請輸入貼吧的地址,去掉pn=後面的數字:'
bdurl = str(raw_input(u' '))
print u'請輸入開始的頁數:'
begin_page = int(raw_input(u' '))
print u'請輸入終點的頁數:'
end_page = int(raw_input(u' '))
#-------- 在這裏輸入參數 ------------------
 

#調用
baidu_tieba(bdurl,begin_page,end_page)

自己在上家寫的那些找不到了,邏輯是 Python 爬蟲腳本 爬取解析網頁,提取有效字段,整理寫入csv文件供市場部門使用

----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

Scrapy爬蟲框架:

安裝:

爬蟲框架scrapy安裝

入門使用:

新建工程:

scrapy startproject tutorial

建好的工程目錄用IDE打開(這裏我用的是pycharm)

  • scrapy.cfg: 項目配置文件
  • items.py: 項目items文件,Items是將要裝載抓取的數據的容器,它工作方式像python裏面的字典
  • pipelines.py: 項目管道文件
  • settings.py: 項目配置文件
  • spiders: 放置spider的目錄

 代碼:

##items
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class W3SchoolItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title = scrapy.Field()
    link = scrapy.Field()
    desc = scrapy.Field()


#################################
##pipelines.py
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

import json
import codecs

class W3SchoolPipeline(object):

    def __init__(self):
        self.file = codecs.open('w3school_data_utf8.json','wb',encoding='utf-8')

    def process_item(self, item, spider):
        line = json.dumps(dict(item)) + '\n'
        self.file.write(line.decode("unicode_escape"))
        return ite


###############################################
##settings.py
# -*- coding: utf-8 -*-

# Scrapy settings for w3school project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'w3school'

SPIDER_MODULES = ['w3school.spiders']
NEWSPIDER_MODULE = 'w3school.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'w3school (+http://www.yourdomain.com)'

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16

# Disable cookies (enabled by default)
#COOKIES_ENABLED=False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'w3school.middlewares.MyCustomSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'w3school.middlewares.MyCustomDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'w3school.pipelines.W3SchoolPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'


###############################################
##w3school_spider.py
from scrapy.selector import Selector
from scrapy import Spider

from w3school.items import W3SchoolItem

class W3schoolSpider(Spider):
    name = "w3school"
    allowed_domains = ["localhost"]
    start_urls = ["http://127.0.0.1:8080/myregister.html"]

    def parse(self, response):
        sel = Selector(response)
        sites = sel.xpath('//div[@id="navsecond"]/div[@id="course"]/ul[1]/li')
        items = []

        for site in sites:
            item = W3SchoolItem()
            title = site.xpath('a/text()').extract()
            link = site.xpath('a/@href').extract()
            desc = site.xpath('a/@title').extract()

            item['title'] = [t.encode('utf-8') for t in title]
            item['link'] = [l.encode('utf-8') for l in link]
            item['desc'] = [d.encode('utf-8') for d in desc]
            items.append(item)

            #log.msg("Appending item...",level='INFO')



        #log.msg("Append done.",level='INFO')
        return items

 其中有使用XPATH解析,跟XML很像,不太熟悉的話可以去W3school學習下

 參考:

知乎:http://www.zhihu.com/question/20899988
scapy:http://www.cnblogs.com/txw1958/archive/2012/07/16/scrapy-tutorial.html

發佈了74 篇原創文章 · 獲贊 3 · 訪問量 4萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章