Python爬蟲來啦,抓取數據導出到excel,簡單明瞭,強大,直接貼代碼

#!/usr/bin/python

import requests
import xlwt
from bs4 import BeautifulSoup


def getHouseList(url):
    house = []
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
    # get從網頁獲取信息
    res = requests.get(url, headers=headers)
    # 解析內容
    soup = BeautifulSoup(res.content, 'html.parser')
    # 房源title
    housename_divs = soup.find_all('div', class_='title')
    for housename_div in housename_divs:
        housename_as = housename_div.find_all('a')
        for housename_a in housename_as:
            housename = []
            # 標題
            housename.append(housename_a.get_text())
            # 超鏈接
            housename.append(housename_a.get('href'))
            house.append(housename)
    huseinfo_divs = soup.find_all('div', class_='houseInfo')
    for i in range(len(huseinfo_divs)):
        info = huseinfo_divs[i].get_text()
        infos = info.split('|')
        # 小區名稱
        house[i].append(infos[0])
        # 戶型
        house[i].append(infos[1])
        # 平米
        house[i].append(infos[2])
    # 查詢總價
    house_prices = soup.find_all('div', class_='totalPrice')
    for i in range(len(house_prices)):
        # 價格
        price = house_prices[i].get_text()
        house[i].append(price)
    return house


# 爬取房屋詳細信息:所在區域、套內面積
def houseinfo(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'}
    res = requests.get(url, headers=headers)
    soup = BeautifulSoup(res.content, 'html.parser')
    msg = []
    # 所在區域
    areainfos = soup.find_all('span', class_='info')
    for areainfo in areainfos:
        # 只需要獲取第一個a標籤的內容即可
        area = areainfo.find('a')
        if (not area):
            continue
        hrefStr = area['href']
        if (hrefStr.startswith('javascript')):
            continue
        msg.append(area.get_text())
        break
    # 根據房屋戶型計算套內面積
    infolist = soup.find_all('div', id='infoList')
    num = []
    for info in infolist:
        cols = info.find_all('div', class_='col')
        for i in cols:
            pingmi = i.get_text()
            try:
                a = float(pingmi[:-2])
                num.append(a)
            except ValueError:
                continue
    msg.append(sum(num))
    return msg


# 將房源信息寫入excel文件
def writeExcel(excelPath, houses):
    workbook = xlwt.Workbook()
    # 獲取第一個sheet頁
    sheet = workbook.add_sheet('git')
    row0 = ['標題', '鏈接地址', '戶型', '面積', '朝向', '總價', '所屬區域', '套內面積']
    for i in range(0, len(row0)):
        sheet.write(0, i, row0[i])
    for i in range(0, len(houses)):
        house = houses[i]
        print(house)
        for j in range(0, len(house)):
            sheet.write(i + 1, j, house[j])
    workbook.save(excelPath)


# 主函數
def main():
    data = []
    for i in range(1, 5):
        print('開始抓取第', i, '頁')
        if i == 1:
            url = 'https://sjz.lianjia.com/ershoufang/l2rs%E5%92%8C%E5%B9%B3%E4%B8%96%E5%AE%B6/'
        else:
            url = 'https://sjz.lianjia.com/ershoufang/pg' + str(i) + 'l2rs%E5%92%8C%E5%B9%B3%E4%B8%96%E5%AE%B6/'
        houses = getHouseList(url)
        for house in houses:
            link = house[1]
            if (not link or not link.startswith('http')):
                continue
            mianji = houseinfo(link)
            # 將套內面積、所在區域增加到房源信息
            house.extend(mianji)
        data.extend(houses)
        print('-------------------第', i, '頁抓取完畢------------------------')
    writeExcel('D:/house.xls', data)
    print('房源抓取完畢,excel導出路徑:%s' % 'D:/house.xls')


if __name__ == '__main__':
    main()

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章