爬蟲實戰——xpath爬取電影天堂

from lxml import etree
import requests


HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)     Chrome/80.0.3987.149 Safari/537.36'
}
BASE_DOMAIN = 'https://www.ygdy8.net/'


def get_detail_urls(url):
    response = requests.get(url, headers=HEADERS)
    text = response.content.decode(encoding='gbk', errors='ignore')
    html = etree.HTML(text)
    detail_urls = html.xpath("//table[@class='tbspan']//a/@href")
    detail_urls = map(lambda url:BASE_DOMAIN+url, detail_urls)
    return detail_urls


def parse_detail_page(url):
    movie = {}
    response = requests.get(url, headers=HEADERS)
    text = response.content.decode(encoding='gbk', errors='ignore')
    html = etree.HTML(text)
    title = html.xpath("//div[@class='title_all']//font[@color='#07519a']/text()")[0]
    movie['title'] = title
    # print(etree.tostring(x,encoding='utf-8').decode("utf-8"))
    zoomE = html.xpath("//div[@id='Zoom']")[0]
    cover = zoomE.xpath(".//img/@src")
    movie['cover'] = cover

    def parse_info(info, rule):
        return info.replace(rule, "").strip()

    infos = zoomE.xpath("//text()")
    # print(infos)
    for index, info in enumerate(infos):
        # print(info)
        # print(index)
        # print("-"*30)
        if info.startswith("◎片  名"):
            info = parse_info(info, "◎片  名")
            movie['name'] = info
            print(info)
        elif info.startswith("◎產  地"):
            info = info.replace("◎產  地", "").strip()
            movie['country'] = info
        elif info.startswith("◎類  別"):
            info = info.replace("◎類  別", "").strip()
            movie['type'] = info
            print(info)
        elif info.startswith("◎豆瓣評分"):
            info = parse_info(info, "◎豆瓣評分")
            movie["douban"] = info
            print(info)
        elif info.startswith("◎主  演"):
            info = parse_info(info, "◎主  演")
            actors = [info]
            # print(info)
            # movie["zhuyan"] = info
            for x in range(index + 1, len(infos)):
                actor = infos[x].strip()
                if actor.startswith("◎"):
                    break
                actors.append(actor)
            movie['actors'] = actors
        elif info.startswith("◎簡  介"):
            info = parse_info(info, "◎簡  介")
            for x in range(index + 1, len(infos)):
                profile = infos[x].strip()
                # print(profile)
                if profile.startswith("【下載地址】"):
                    break
                print(profile)
                movie["jianjie"] = profile
    download_url = html.xpath("//td[@bgcolor='#fdfddf']/a/@href")
    print(download_url)
    movie["download_url"] = download_url
    return movie


def spider():
    base_url = "https://www.ygdy8.net/html/gndy/dyzz/list_23_{}.html"
    movies = []
    for x in range(1, 8):
        url = base_url.format(x)
        detail_urls = get_detail_urls(url)
        for detail_url in detail_urls:
            movie = parse_detail_page(detail_url)
            movies.append(movie)
            print(movie)

    with open('D:/Users/24913/Desktop/dianying.txt', 'w', encoding='utf-8') as fp:
        num = 0
        for movie in movies:
            num = num + 1
            fp.write(str(num)+'\n')
            for key,value in movie.items():
            # 按照下面的輸出格式會有錯誤
            # fp.write(str(num)+"\n劇名:"+str(movie['title'])+"\n封面:"+str(movie['cover'])+"\n年代:"+str(movie['year'])+
            #          "\n國家:"+str(movie['country'])+"\n類別:"+str(movie['category'])+"\n豆瓣評分:"+str(movie['dou_rank'])+
            #          "\n片長:" + str(movie['duration']) +"\n導演:"+str(movie['director'])+"\n主演:"+str(movie['actors'])+
            #          "\n簡介:" + str(movie['profile']) +"\n下載地址:"+str(movie['download_url'])+'\n')
                fp.write(str(key)+ ":" + str(value) +'\n')
    print("文件保存成功")


if __name__ == '__main__':
    spider()

利用xpath爬取電影天堂的電影相關的信息

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章