CSDN博客遷移


@author: enjoyhot
代碼下載:github

一、綜述

最開始的博客用的是oschina, 自我感覺小清新,支持比較多風格的編輯器,容易編輯程度比csdn的要好。後來因爲csdn人氣的關係,又轉到csdn,不過csdn編輯確實比較虐心,再後來就在服務器上搭一個WordPress的博客,不過用起來不是很爽。幾個月前支持Markdown編輯,這無疑給用github pages的用戶一個福音,因爲可以直接上傳到自己的github博客上。

用github pages生成靜態的頁面,目前比較流行有Jekyll,Octopress,Hexo等,其他的我沒怎麼調查,jekyll是github推薦的,上面有他們給的官方框架、主題,支持網頁書寫Markdown文章。後兩者需要編譯後再上傳,github上呈現出來的是html文件。總體而言,後兩者的界面更好,用起來更加舒適。我用的是Hexo,命令簡潔,不過要注意備份整個工程,不然後果不堪設想。

二、遷移CSDN原文檔到github Pages

1、前文

雖然目前CSDN支持markdown,但以前的文章都是用xeditor編輯器寫的,不能導出,所以調研了方法。
總體而言有兩種方法,但好像沒多少人採用,因爲格式可能不好。
1.爬取頁面,導出html,然後在放在hexo中,目錄爲/source/_post/,直接放html文件,然後設置layout:false,那麼hexo會忽略對html的編譯,在瀏覽時直接超鏈接到html文件
2.將html文件再用程序轉換爲markdown
3.直接用代碼爬取頁面然後生成markdown文件

第1種方法可能會遇到html文件中格式不支持的情況,沒得到解決;
第2種方法發現在線轉換效果也不好,就尋求代碼解決,github上有一段程序,作者說可行,不過我環境沒搭成功,不懂php,更何況要裝curl(這個之前做android時NDK開發時也很難配置),後來用第3種方法就直接寫python爬蟲程序,參考github的一段程序,不過程序有些問題,也有些不符合如今CSDN的佈局,所以我大改了一下,轉爲markdown的那一部分程序脈絡是差不多的,這個也是最關鍵的部分,直接影響到markdown的顯示,不過我也做得不太好。

2、程序

所需安裝庫:
BeautifulSoup
根據版本不同可能要改動相應的代碼,一般不用改。

#! /usr/bin/env python
#coding=utf-8
import urllib2
from bs4 import BeautifulSoup
import logging
import re
import threading
import traceback
import time 
import datetime

import sys
reload(sys)
sys.setdefaultencoding('gb18030')


# global variable
blog = "http://blog.csdn.net"
url = "http://blog.csdn.net/gugugujiawei?viewmode=contents"
outputDir = 'F:\\linux\\Share\\github\\article\\'
gRetryCount = 4


def decodeHtmlSpecialCharacter(htmlStr):
    specChars = {" " : "", \
                 " " : "", \
                 " " : "", \
                 "&lt;" : "<", \
                 "&gt" : ">", \
                 "&amp;" : "&", \
                 "&quot;" : "\"", \
                 "&copy;" : "®", \
                 "&times;" : "×", \
                 "&divide;" : "÷", \
                 }
    for key in specChars.keys():
        htmlStr = htmlStr.replace(key, specChars[key])
    return htmlStr

def repalceInvalidCharInFilename(filename):
    specChars = {"\\" : "", \
                 "/" : "", \
                 ":" : "", \
                 "*" : "", \
                 "?" : "", \
                 "\"" : "", \
                 "<" : "小於", \
                 ">" : "大於", \
                 "|" : " and ", \
                 "&" :" or ", \
                 }
    for key in specChars.keys():
        filename = filename.replace(key, specChars[key])
    return filename


def getPageUrlList(url):

    global blog

    #獲取所有的頁面的 url
    user_agent = 'Mozilla/4.0 (compatible;  MSIE 5.5; Windows NT)'  
    header = { 'User-Agent' : user_agent } 

    request = urllib2.Request(url, None, header)
    response = urllib2.urlopen(request)
    data = response.read()

    #print data
    soup = BeautifulSoup(data)
    pageListDocs = soup.find_all(id="article_list")

    # artclie----{url:title}
    articleUrlTitle = {}

    #print len(pageListDocs)

    for pageList in pageListDocs:

        h1List = pageList.find_all('h1')

        for articleList in h1List:            

            hrefDocs = articleList.find_all("a")


            if len(hrefDocs) > 0:
                articleHrefDoc = hrefDocs[0]                            

                #print "hello",articleHrefDoc
                articleUrl =  blog + articleHrefDoc["href"]
                articleTitle = articleHrefDoc.text
                articleUrlTitle[articleUrl] = articleTitle

    print 'the count of articles is',len(articleUrlTitle)
    '''
    for s in articleUrlTitle:
        print s,'--',articleUrlTitle[s]
    '''
    return articleUrlTitle


def download(url, title):
    # 下載文章,並保存爲 markdown 格式
    logging.info(" >> download: " + url)
    print 'downloading the article',title

    data = None
    title = '"' + title + '"'
    categories = ""
    content = ""
    #postDate = datetime.datetime.now()

    global gRetryCount
    count = 0
    while True:
        if count >= gRetryCount:
            break
        count = count + 1
        try:            
            time.sleep(2.0) #訪問太快會不響應
            user_agent = 'Mozilla/4.0 (compatible;  MSIE 5.5; Windows NT)'  
            header = { 'User-Agent' : user_agent }    
            request = urllib2.Request(url, None, header)
            response = urllib2.urlopen(request)            
            data = response.read()
            break
        except Exception,e:
            exstr = traceback.format_exc()
            logging.info(" >> failed to download " + url + ", retry: " + str(count) + ", error:" + exstr)
            pass

    if data == None:
        logging.info(" >> failed to download " + url)
        return

    #print data

    soup = BeautifulSoup(data)

    #date=link_postdate


    manageDocs = soup.find_all("div", "article_manage")
    for managerDoc in manageDocs:
        categoryDoc = managerDoc.find_all("span", "link_categories")
        if len(categoryDoc) > 0:                            
            categories = categoryDoc[0].a.get_text().encode('UTF-8').strip()
            categories  = categories.decode('utf-8').encode('gb2312')            

        postDateDoc = managerDoc.find_all("span", "link_postdate")
        if len(postDateDoc) > 0:
            postDateStr = postDateDoc[0].string.encode('UTF-8').strip()
            postDate = datetime.datetime.strptime(postDateStr, '%Y-%m-%d %H:%M')
            print 'date',postDate

    contentDocs = soup.find_all(id="article_content")
    for contentDoc in contentDocs:
        htmlContent = contentDoc.prettify().encode('UTF-8')
        #print htmlContent
        #file = open('F:\\linux\\Share\\github\\out2.txt','a+')
        #file.write(htmlContent)        
        content = htmlContent2String(htmlContent)

    exportToMarkdown(outputDir, postDate, categories, title, content)

# htmlContent2String 是整個程序的關鍵,用於將html轉換爲markdown格式
def htmlContent2String(contentStr):

    # 因爲格式中可能會有點亂,換行符亂入,所以用[\s\S]匹配任何字符,包括換行符,注意其中的?是爲了去除貪婪匹配
    # <img src="https://img-blog.csdn.net/20150118194525562" align="middle" width="400 height="300" alt=""> 
    # 圖片鏈接   
    patternImg = re.compile(r'(<img[\s\S]+?src=")([\s\S]+?)("[\s\S]+?>)')    

    # <a target="_blank" href="http://blog.csdn.net/gugugujiawei/article/details/42558411">博文</a>
    # 文字鏈接
    patternHref = re.compile(r'(<a[\s\S]+?href=")([\s\S]*?)("[\s\S]*?>)([\s\S]+?)(</a>)')

    # 去除html各種標籤,這裏的?則是指匹配0次或1次
    patternRemoveHtml = re.compile(r'</?[^>]+>')

    resultContent = patternImg.sub(r'![image_mark](\2)', contentStr)
    resultContent = patternHref.sub(r'[\4](\2)', resultContent)
    resultContent = re.sub(patternRemoveHtml, r'', resultContent)
    resultContent = decodeHtmlSpecialCharacter(resultContent)

    #file = open('F:\\linux\\Share\\github\\out3.txt','a+')
    #file.write(resultContent)

    return resultContent




def exportToMarkdown(exportDir, postdate, categories, title, content):
    titleDate = postdate.strftime('%Y-%m')
    contentDate = postdate.strftime('%Y-%m-%d %H:%M:%S %z')
    filename = title
    filename = repalceInvalidCharInFilename(filename)
    filepath = exportDir + filename + '.txt'

    #newFile = open(unicode(filepath, "utf8"), 'w') 
    newFile = open(filepath,'a+')

    # 根據自己需要選擇去留註釋,這裏categores和tag用了一樣的

    # newFile.write('---' + '\n')
    # newFile.write('layout: post' + '\n')    
    newFile.write('title: ' + title + '\n')
    newFile.write('date: ' + contentDate + '\n')
    # newFile.write('comments: true' + '\n')
    newFile.write('categories: [' + categories + ']' + '\n')
    newFile.write('tags: [' + categories + ']' + '\n')

    #newFile.write('description:' + title + '\n')
    # newFile.write('keywords: ' + categories + '\n')

    newFile.write('---' + '\n\n')


    content = content.decode('utf-8').encode('gb18030')
    #print content
    newFile.write(content)
    newFile.write('\n')
    newFile.close()



if __name__ == "__main__":

    global url    
    articleUrlTitle = getPageUrlList(url)

    '''
    for s in articleUrlTitle:
        print s,'--',articleUrlTitle[s]
    '''

    #multithread download
    threads = []
    for url in articleUrlTitle:
        patternTitle = re.compile('\r\n *(.+) *\r\n')
        title = patternTitle.sub(r'\1',articleUrlTitle[url])
        # print 'title',title
        t = threading.Thread(target = download,args = (url,title))    
        t.start()
        threads.append(t)


    for i in threads:
        i.join()

    print "success"

注意一下,我是直接在windows下運行生成的txt文件,所以文件時dos文件,當在linux下編輯的話會出現各種因格式問題導致的奇怪的問題,這需要自己改一下編碼。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章