【python爬蟲筆記】一:先爬一個小頁面

前言:一直想爬點東西,最近開始學習用python爬頁面,先從自己的博客頁面開始吧。

基礎:

python3.6
模塊:BeautifulSoup(安裝要用pip install beautifulsoup4,這個坑了我一下)、randow、xlwt(寫入excel)

需求:

預計獲取所有的文章名稱、描述、創建時間、閱讀人數、評論人數

開始

from bs4 import BeautifulSoup
import urllib.parse
import urllib.request
import random
import xlwt
import re

# 隨機獲取agent,避免被封。雖然這裏沒什麼必要···
def getAgent():
    agents = [
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
        "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
        "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
    ]
    return random.choice(agents)

請求數據

def requestCSDN():
    print('請求數據')
    url = "http://blog.csdn.net/qq_24142325";
    values = {
        'data':'viewmode=list'#可以帶各種參數
    }
    result = getHtml(url, values)
    return result

def getHtml(url, values):
    headers = {'User-Agent':getAgent()}
    data = urllib.parse.urlencode(values)
    response_result = urllib.request.urlopen(url+'?'+data).read()#組裝好url,開始請求頁面
    html = response_result.decode('utf-8')
    return html

處理頁面

先列出所需數據,即下面的articlesHtml數據

<div class="list_item article_item">
    <div class="article_title">
        <span class="ico ico_type_Original"></span>

        <h1>
            <span class="link_title">
            <a href="/qq_24142325/article/details/75008179">
                【讀書筆記】模擬對話,理解TCP的三次握手與四次揮手
            </a>
            </span>

        </h1>
    </div>

    <div class="article_description">
        前言:看到一篇博文圖解 TCP 三次握手與四次分手,記錄自己的理解。
        三次握手:一:client:喂喂喂?server你聽的到嗎?
          第一次握手:# 測試自己的發信能力和對方的收信能力
        二:server:恩恩,我聽的到你,你聽的到我嗎?
          第二次握手:#證明對方的發信能力和自己的收信能力,測試自己的發信能力和對方的收信能力
        三:client:我也能聽到你啊,那我們就開始對話吧!
          第三次握手:...
    </div>
    <div class="article_manage">
        <span class="link_postdate">2017-07-12 12:04</span>
        <span class="link_view" title="閱讀次數"><a href="/qq_24142325/article/details/75008179" title="閱讀次數">閱讀</a>(18)</span>
        <span class="link_comments" title="評論次數"><a href="/qq_24142325/article/details/75008179#comments" title="評論次數" onclick="_gaq.push(['_trackEvent','function', 'onclick', 'blog_articles_pinglun'])">評論</a>(0)</span>

    </div>

    <div class="clear"></div>
</div>

主體部分:

#方法目的:1.獲取頁面數據中所需部分,然後寫入excel中
def delResult(result):
    soup = BeautifulSoup(result, 'html.parser')
    title = soup.find(id='blog_title').h2.a.string#find只獲取一個
    articles = []
    articlesHtml = soup.find_all(class_="list_item article_item")#獲取數據上面已經列出
    for art in articlesHtml:
        article = {'name':'', 'des':'', 'time':'', 'read':'', 'note':''}#利用字典存儲數據
        article['name'] = trimSpaceAndLineBread(art.find(class_="article_title").h1.span.a.string)#消除空格和換行符
        article['des'] = trimSpaceAndLineBread(art.find(class_="article_description").string)
        article['time'] = trimSpaceAndLineBread(art.find(class_="article_manage").find(class_="link_postdate").string)
        article['read'] = txt_wrap_by('(', ')', trimSpaceAndLineBread(art.find(class_="article_manage").find(class_="link_view").text))#text的值爲“閱讀(18)”這種,我們需要的只是“()”中的‘18’
        article['note'] = txt_wrap_by('(', ')', trimSpaceAndLineBread(art.find(class_="article_manage").find(class_="link_comments").text))
        articles.append(article)#加進articles中
    result = {'title':title, 'articles':articles}
    saveExl('index.xls', result)#存進excel表格中
#消除空格和換行符,驗證none,不然萬一str是none會報錯
def trimSpaceAndLineBread(str):
    if str != None:
        str = str.replace('\r','').replace('\n','').replace(' ','')
    else:
        str = str
    return str
#獲取兩個字符串之間的值,即“閱讀(18)”,“()”中的18
def txt_wrap_by(start_str, end, html):
    start = html.find(start_str)
    if start >= 0:
        start += len(start_str)
        end = html.find(end, start)
        if end >= 0:
            return html[start:end].strip()
#寫入excel
def saveExl(path, result):
    try:
        workbook = xlwt.Workbook()#Workbook 的W必須大寫
        sheet1 = workbook.add_sheet(result['title'], cell_overwrite_ok=True)
        c = 0;
        sheet1.write(c, 0, '文章名')
        sheet1.write(c, 1, '文章描述')
        sheet1.write(c, 2, '創建時間')
        sheet1.write(c, 3, '閱讀次數')
        sheet1.write(c, 4, '評論次數')
        for f in result['articles']:#循環寫入
            c = c + 1
            sheet1.write(c, 0, f['name'])
            sheet1.write(c, 1, f['des'])
            sheet1.write(c, 2, f['time'])
            sheet1.write(c, 3, f['read'])
            sheet1.write(c, 4, f['note'])
            print('正在寫入第' + str(c) + '行')
        workbook.save(path)
        print('寫入成功')
    except IOError:
        print('寫入出錯')

調用

delResult(requestCSDN())

結果

結果圖片

最後

基本上,就是最簡單的一個爬蟲了,剛開始做,還有很多可以優化的地方,慢慢來吧。
附上自己的完整代碼

from bs4 import BeautifulSoup
import urllib.parse
import urllib.request
import random
import xlwt
import re


def getAgent():
    agents = [
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
        "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
        "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
    ]
    return random.choice(agents)

def getHtml(url, values):
    headers = {'User-Agent':getAgent()}
    data = urllib.parse.urlencode(values)
    response_result = urllib.request.urlopen(url+'?'+data).read()
    html = response_result.decode('utf-8')
    return html

def requestCSDN():
    print('請求數據')
    url = "http://blog.csdn.net/qq_24142325";
    values = {
        'data':'viewmode=list'
    }
    result = getHtml(url, values)
    return result

def saveResult(path, result):
    try:
        with open(path, 'w') as f:
            f.write(result)
    except IOError:
        print('寫入出錯')

def saveExl(path, result):
    try:
        workbook = xlwt.Workbook()
        sheet1 = workbook.add_sheet(result['title'], cell_overwrite_ok=True)
        c = 0;
        sheet1.write(c, 0, '文章名')
        sheet1.write(c, 1, '文章描述')
        sheet1.write(c, 2, '創建時間')
        sheet1.write(c, 3, '閱讀次數')
        sheet1.write(c, 4, '評論次數')
        for f in result['articles']:
            c = c + 1
            sheet1.write(c, 0, f['name'])
            sheet1.write(c, 1, f['des'])
            sheet1.write(c, 2, f['time'])
            sheet1.write(c, 3, f['read'])
            sheet1.write(c, 4, f['note'])
            print('正在寫入第' + str(c) + '行')
        workbook.save(path)
        print('寫入成功')
    except IOError:
        print('寫入出錯')

def delResult(result):
    soup = BeautifulSoup(result, 'html.parser')
    title = soup.find(id='blog_title').h2.a.string
    articles = []
    articlesHtml = soup.find_all(class_="list_item article_item")
    for art in articlesHtml:
        article = {'name':'', 'des':'', 'time':'', 'read':'', 'note':''}
        article['name'] = trimSpaceAndLineBread(art.find(class_="article_title").h1.span.a.string)
        article['des'] = trimSpaceAndLineBread(art.find(class_="article_description").string)
        article['time'] = trimSpaceAndLineBread(art.find(class_="article_manage").find(class_="link_postdate").string)
        article['read'] = txt_wrap_by('(', ')', trimSpaceAndLineBread(art.find(class_="article_manage").find(class_="link_view").text))
        article['note'] = txt_wrap_by('(', ')', trimSpaceAndLineBread(art.find(class_="article_manage").find(class_="link_comments").text))
        articles.append(article)
    result = {'title':title, 'articles':articles}
    saveExl('index.xls', result)

def trimSpaceAndLineBread(str):
    if str != None:
        str = str.replace('\r','').replace('\n','').replace(' ','')
    else:
        str = str
    return str

def txt_wrap_by(start_str, end, html):
    start = html.find(start_str)
    if start >= 0:
        start += len(start_str)
        end = html.find(end, start)
        if end >= 0:
            return html[start:end].strip()

def main():
    delResult(requestCSDN())

if __name__ == '__main__':
    main()

最後的最後

唉,早知道不爬評論了,全是0,真心酸

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章