python寫的有聲小說爬蟲

querybook.py

from bs4 import BeautifulSoup
from lxml import html
import xml
import requests
import splider
class QuName:
    def __init__(self,number):
        self.number = number
    def getPageNum(self,url):
        f = requests.get(url)  # Get該網頁從而獲取該html內容
        soup = BeautifulSoup(f.content, "lxml")
        try:
            pageNum = soup.find('div', class_="pagesnums").find('span').text
            print('getPageNum執行成功')
            return int(pageNum[3:5])
        except:
            print('getPageNum執行失敗')
        finally:
            print('___________________________')
    def getBookList(self):
        for num in range(1,self.number):
            pageNum = self.getPageNum('http://www.ting89.com/booklist/'+str(num)+'.html')
            self.getBookInfo('http://www.ting89.com/booklist/'+str(num)+'.html')
            print('http://www.ting89.com/booklist/'+str(num)+'.html')
            for num1 in range(2,pageNum):
                self.getBookInfo('http://www.ting89.com/booklist/'+str(num)+'_'+str(num1)+'.html')
                print('http://www.ting89.com/booklist/'+str(num)+'_'+str(num1)+'.html')

    def getBookInfo(self,url):
        f = requests.get(url)  # Get該網頁從而獲取該html內容
        soup = BeautifulSoup(f.content, "lxml")
        try:
            bookList = soup.find('div', class_="clist").findAll('li')
            for i in bookList:
                imgUrl = i.find('img')
                print('書籍封面',imgUrl['src'])
                # print('書名:',i.find('b').text)
                pList = i.findAll('p')
                for j in pList:
                    print(j.text)
                #下載文件
                splider.YsSpider(i.find('b').text).download_files()
        except:
            print('getBookInfo執行失敗')
        finally:
            print('___________________________')

qn = QuName(13)         #這裏是網站的類別數量(偷了個懶,直接寫了個數字)
qn.getBookList()

splider.py

import requests
import urllib
import re
import os
import time
class YsSpider:
    def __init__(self, name):
        self.search_name = name
        self.search_url = "http://www.ting89.com/search.asp?searchword="
        self.home_url = "http://www.ting89.com/books/"
        self.index_pattern = r"""<a href="/books/([0-9]+).html" title="(.+?)" target='_blank'>"""
        self.chapter_pattern=r"""<a href='(/down/\?[^-]+-\d+.html)' target="_blank">(.+?)</a>"""
        self.down_pattern=r"""url=(.*)/(.+?)\.mp3"""
        self.book_id = ''
        self.book_name = ''
        self.Chapter_list = []

    # 返回搜索書目的id
    def searchbook(self):
        file = requests.get(self.search_url + urllib.parse.quote(self.search_name, encoding='gb2312'))
        data = file.content.decode('gbk')
        result = re.findall(self.index_pattern, data)
        if len(result):
            for index, i in enumerate(result):
                print('%d.%s'%(index+1,i[1]))
                # str = input("輸入你要下載的書目名稱序號: ")
                str = '1'
                self.book_name = result[int(str)-1][1]
                self.book_id = result[int(str)-1][0]
                return self.book_id
            else:
                print('*******沒有找到你輸入的相關書籍,請更換後重新運行程序*******')
                exit()

    def get_chapter_list(self):#獲取各章節list和url
        data = requests.get(self.home_url+self.searchbook()+'.html').content.decode('gbk')
        result = re.findall(self.chapter_pattern, data)
        return result
    def _getAllUrl(self):# 獲得所有的章節的下載地址
        chapter_list = self.get_chapter_list()
        chapter = [x[0] for x in chapter_list]
        self.Chapter_list= [x[1] for x in chapter_list]
        _list = [x[1] for x in chapter_list]
        data = requests.get("http://www.ting89.com" + chapter[0]).content.decode('gbk')
        result = re.findall(self.down_pattern, data)
        # return result
        return self.sub_get_url(result[0][0],_list, re.search("^0.*1$", result[0][1]))

    def sub_get_url(self, down_url, _list, down_url_flag):
        url = []
        if down_url_flag:
            xulie = list(range(len(_list)))
            weishu = len(str(xulie[-1]))
            for i in xulie:
                i1 = i + 1
                tmp_url = down_url+'/' + str(i1).zfill(weishu) + '.mp3'
                url.append(urllib.request.quote(tmp_url, safe='/:?='))
        else:
            for item in _list:
                tmp_url = down_url + '/'+item + ".mp3"
                url.append(urllib.request.quote(tmp_url, safe='/:?='))
        return url

# 保存指定URL的文件
    def save_a_file(self, url, path, chapter):
        try:
            print('嘗試下載',chapter)
            if not os.path.exists(path):
                response = requests.get(url)
                with open(path, 'wb') as f:
                    f.write(response.content)
                    f.close
                    print(chapter,'保存成功')
                response.close()
                time.sleep(1)
            else:
                print('文件已經存在')
        except:
            print('爬取失敗,已下載至',chapter,'即將重新嘗試下載')
            self.save_a_file(url, path, chapter)

    def download_files(self):
        result = self._getAllUrl()# 所有的章節對應的下載地址
        root = os.path.join(os.getcwd(), self.book_name)
        if not os.path.exists(root):
            os.mkdir(root)
        for index,i in enumerate(result):
            path = os.path.join(root, self.Chapter_list[index])+'.mp3'
            self.save_a_file(i, path, self.Chapter_list[index])
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章