爬取新浪微博用戶信息及微博內容並進行可視化分析

參考博文:https://blog.csdn.net/asher117/article/details/82793091
主要代碼如下圖

from selenium import webdriver
from pyecharts.charts import PictorialBar
from pyecharts.charts import Line
from opdata.opexcel import Operatingexcel
from bs4 import BeautifulSoup
import time
import Draw as draw
import re
from pyecharts import options as opts
from collections import Counter
import jieba.posseg as psg
from snownlp import SnowNLP
# browser = webdriver.Chrome()
# #給定登陸的網址
# url = 'https://passport.weibo.cn/signin/login'
# browser.get(url)
# time.sleep(3)
def login():
    print(u'登陸新浪微博手機端...')

    # 找到輸入用戶名的地方,並將用戶名裏面的內容清空,然後送入你的賬號
    username = browser.find_element_by_css_selector('#loginName')
    time.sleep(2)
    username.clear()
    username.send_keys('輸入自己的賬號')  # 輸入自己的賬號
    # 找到輸入密碼的地方,然後送入你的密碼
    password = browser.find_element_by_css_selector('#loginPassword')
    time.sleep(2)
    password.send_keys('輸入自己的密碼')
    # 點擊登錄
    browser.find_element_by_css_selector('#loginAction').click()
    ##這裏給個15秒非常重要,因爲在點擊登錄之後,新浪微博會有個九宮格驗證碼,下圖有,通過程序執行的話會有點麻煩(可以參考崔慶才的Python書裏面有解決方法),這裏就手動
    time.sleep(15)
    print('完成登陸!')
def get_info():
    dic_info={}
    id = 'dengchao'
    niCheng = id
    # 用戶的url結構爲 url = 'http://weibo.cn/' + id
    url = 'http://weibo.cn/' + id
    browser.get(url)
    time.sleep(3)
    # 使用BeautifulSoup解析網頁的HTML
    soup = BeautifulSoup(browser.page_source, 'lxml')
    # 爬取商戶的uid信息
    uid = soup.find('td', attrs={'valign': 'top'})
    uid = uid.a['href']
    uid = uid.split('/')[1]
    # 爬取最大頁碼數目
    pageSize = soup.find('div', attrs={'id': 'pagelist'})
    pageSize = pageSize.find('div').getText()
    pageSize = (pageSize.split('/')[1]).split('頁')[0]
    # 爬取微博數量
    divMessage = soup.find('div', attrs={'class': 'tip2'})
    weiBoCount = divMessage.find('span').getText()
    weiBoCount = (weiBoCount.split('[')[1]).replace(']', '')
    # 爬取關注數量和粉絲數量
    a = divMessage.find_all('a')[:2]
    guanZhuCount = (a[0].getText().split('[')[1]).replace(']', '')
    fenSiCount = (a[1].getText().split('[')[1]).replace(']', '')
    dic_info.setdefault("微博總數",weiBoCount)
    dic_info.setdefault("微博關注", guanZhuCount)
    dic_info.setdefault("微博粉絲", fenSiCount)
    contents=[]
    dianZans=[]
    zhuanFas=[]
    pinLuns=[]
    faBuTimes=[]
    yuanChuangs=[]
    # 通過循環來抓取每一頁數據
    # int(pageSize) + 1
    for i in range(1, 10):  # pageSize+1
        # 每一頁數據的url結構爲 url = 'http://weibo.cn/' + id + ‘?page=’ + i
        url = 'https://weibo.cn/dengchao?page=' + str(i)
        browser.get(url)
        time.sleep(1)
        # 使用BeautifulSoup解析網頁的HTML
        soup = BeautifulSoup(browser.page_source, 'lxml')
        body = soup.find('body')
        divss = body.find_all('div', attrs={'class': 'c'})[1:-2]
        for divs in divss:
            # yuanChuang : 0表示轉發,1表示原創
            yuanChuang = '1'  # 初始值爲原創,當非原創時,更改此值
            div = divs.find_all('div')
            # 這裏有三種情況,兩種爲原創,一種爲轉發

            if (len(div) == 2):  # 原創,有圖
                # 爬取微博內容
                content = div[0].find('span', attrs={'class': 'ctt'}).getText()
                aa = div[1].find_all('a')
                for a in aa:
                    text = a.getText()
                    if (('贊' in text) or ('轉發' in text) or ('評論' in text)):
                        # 爬取點贊數
                        if ('贊' in text):
                            dianZan = (text.split('[')[1]).replace(']', '')
                        # 爬取轉發數
                        elif ('轉發' in text):
                            zhuanFa = (text.split('[')[1]).replace(']', '')
                        # 爬取評論數目
                        elif ('評論' in text):
                            pinLun = (text.split('[')[1]).replace(']', '')
                            # 爬取微博來源和時間
                span = divs.find('span', attrs={'class': 'ct'}).getText()
                faBuTime = str(span.split('來自')[0])
                contents.append(content)
                dianZans.append(dianZan)
                zhuanFas.append(zhuanFa)
                pinLuns.append(pinLun)
                faBuTimes.append(faBuTime)
                yuanChuangs.append(yuanChuang)
            # 和上面一樣
            elif (len(div) == 1):  # 原創,無圖
                content = div[0].find('span', attrs={'class': 'ctt'}).getText()
                aa = div[0].find_all('a')
                for a in aa:
                    text = a.getText()
                    if (('贊' in text) or ('轉發' in text) or ('評論' in text)):
                        if ('贊' in text):
                            dianZan = (text.split('[')[1]).replace(']', '')
                        elif ('轉發' in text):
                            zhuanFa = (text.split('[')[1]).replace(']', '')
                        elif ('評論' in text):
                            pinLun = (text.split('[')[1]).replace(']', '')
                span = divs.find('span', attrs={'class': 'ct'}).getText()
                faBuTime = str(span.split('來自')[0])
                contents.append(content)
                dianZans.append(dianZan)
                zhuanFas.append(zhuanFa)
                pinLuns.append(pinLun)
                faBuTimes.append(faBuTime)
                yuanChuangs.append(yuanChuang)
            # 這裏爲轉發,其他和上面一樣
            elif (len(div) == 3):  # 轉發的微博
                yuanChuang = '0'
                content = div[0].find('span', attrs={'class': 'ctt'}).getText()
                aa = div[2].find_all('a')
                for a in aa:
                    text = a.getText()
                    if (('贊' in text) or ('轉發' in text) or ('評論' in text)):
                        if ('贊' in text):
                            dianZan = (text.split('[')[1]).replace(']', '')
                        elif ('轉發' in text):
                            zhuanFa = (text.split('[')[1]).replace(']', '')
                        elif ('評論' in text):
                            pinLun = (text.split('[')[1]).replace(']', '')
                span = divs.find('span', attrs={'class': 'ct'}).getText()
                faBuTime = str(span.split('來自')[0])
                contents.append(content)
                dianZans.append(dianZan)
                zhuanFas.append(zhuanFa)
                pinLuns.append(pinLun)
                faBuTimes.append(faBuTime)
                yuanChuangs.append(yuanChuang)
    dic_info.setdefault("內容", contents)
    dic_info.setdefault("點贊", dianZans)
    dic_info.setdefault("轉發", zhuanFas)
    dic_info.setdefault("評論", pinLuns)
    dic_info.setdefault("時間", faBuTimes)
    dic_info.setdefault("原創", yuanChuangs)
    time.sleep(2)
    # print(i)
    return dic_info
# 存入txt文件
def writetxt(jjrw, result):
    with open(jjrw, 'w+',encoding="utf-8") as r:
        for i in range(len(result)):
            if result[i] != "":
                s = str(result[i]).strip().replace("emoji", "").replace("span", "").replace("class", "").replace("#","").replace("http","")
                rec = re.compile("1f\d+\w*|[<>/=]|\r|\n|")
                s = rec.sub("", s)
                r.write(s+"\n")
def count(seg_list1):
    # 計數
    count = Counter(seg_list1)
    # 字典排序
    result = sorted(count.items(), key=lambda x: x[1], reverse=True)
    return result
# 讀取文件並進行分詞排序
def readjieba(text,excludes,list_replace):
    dic_result = {}
    seg_list1 = []
    nr=[]
    ns=[]
    # 分詞
    seg_list = psg.cut(text)
    for w, t in seg_list:
        # 去除停用詞
        if len(w) != 1 and t != 'm' and w not in excludes:
            # 替換詞
            for j in list_replace:
                if w == j[0]:
                    real_word == j[1]
                else:
                    real_word = w
            if t == 'nr':
                nr.append("{0}".format(real_word))
            if t=='ns':
                ns.append("{0}".format(real_word))
            seg_list1.append("{0}".format(real_word))

    dic_result.setdefault("全部", count(seg_list1))
    dic_result.setdefault("人名", count(nr))
    dic_result.setdefault("地名", count(ns))
    return dic_result
# 趨勢圖
def drawline(arrt,value,value1,value2,name):

    # 圖表初始化配置
    init_opts = opts.InitOpts(page_title=name)

    line = Line(init_opts=init_opts)
    # 標題配置
    title = opts.TitleOpts(title=name,
                           pos_left="10%")
    # 圖例配置
    legend_opts = opts.LegendOpts(orient="horizontal",
                                  pos_top="5%",
                                  pos_right="15%")

    # 工具箱配置
    # feature = opts.ToolBoxFeatureOpts(save_as_image=True, restore=True, data_view=True, data_zoom=True)
    # 工具箱配置
    toolbox_opts = opts.ToolboxOpts(orient="vertical",
                                    pos_bottom="15%",
                                    pos_left="90%",
                                    )

    line.set_global_opts(title_opts=title,
                         legend_opts=legend_opts,
                         toolbox_opts=toolbox_opts,
                         datazoom_opts = opts.DataZoomOpts(orient="vertical"),
                         )
    line.add_xaxis(arrt, )
    line.add_yaxis("點贊", value, is_smooth=True, linestyle_opts=opts.LineStyleOpts(color="#E83132", width="4"))
    line.add_yaxis("評論", value1, is_smooth=True, linestyle_opts=opts.LineStyleOpts(color="#00FFFF	", width="4"))
    line.add_yaxis("轉發", value2, is_smooth=True, linestyle_opts=opts.LineStyleOpts(color="#7CFC00", width="4"))
    line.render('{0}.html'.format(name))
def drawPictorialBar(location,values,name):
    c = (
        PictorialBar()
        .add_xaxis(location)
        .add_yaxis(
            "",
            values,
            label_opts=opts.LabelOpts(is_show=False),
            symbol_size=22,
            symbol_repeat="10000",
            symbol_offset=[0, -5],
            is_symbol_clip=True,
            # symbol='image://https://github.githubassets.com/images/spinners/octocat-spinner-32.gif'
            symbol='image://http://weizhendong.top/images/1.png'
        )
        .reversal_axis()
        .set_global_opts(
            title_opts=opts.TitleOpts(title=name),
            xaxis_opts=opts.AxisOpts(is_show=False),
            yaxis_opts=opts.AxisOpts(
                axistick_opts=opts.AxisTickOpts(is_show=False),
                axisline_opts=opts.AxisLineOpts(
                    linestyle_opts=opts.LineStyleOpts(opacity=0)
                ),
            ),
        )
        .render("{0}.html".format(name))
    )
def read_snowNLP(filename):
    snow_list = []
    a=0
    b=0
    c=0
    with open(filename, "r", encoding='utf-8') as f:
        for line in f.readlines():
            if line != "":
                s = SnowNLP(line)
                if s.sentiments > 0.5:
                    snow_list.append("{0}——褒義".format(line))
                    a+=1
                elif s.sentiments < 0.5:
                    snow_list.append("{0}——貶義".format(line))
                    b += 1
                else:
                    snow_list.append("{0}——中性".format(line))
                    c+=1
    return snow_list,a,b,c
if __name__ == '__main__':
    # 登錄
    # login()
    # 獲取信息
    # dic_info=get_info()
    # print(dic_info)
    ol = Operatingexcel()
    # 存儲到excel
    # ol.set_excel_dic(dic_info, "data\csdn_data.xlsx", 0, 0)
    dics = ol.get_excel_dic("data\csdn_data.xlsx", "大學排名")
    # print(dics)

    """繪製餅圖"""
    # yuanchuang = dict()
    # for f in dics["原創"]:
    #     if f == '1':
    #         yuanchuang["原創"] = yuanchuang.get("原創", 0) + 1
    #     elif f == '0':
    #         yuanchuang["非原創"] = yuanchuang.get("非原創", 0) + 1
    # attr = ['原創', '非原創']
    # value = [yuanchuang["原創"], yuanchuang["非原創"]]
    # draw.drawpie(attr,value,"data/原創和非原創餅圖")

    """繪製詞雲"""
    excludes = {'將軍', '卻說', '令人', '趕來', '徐州', '不見', '下馬', '喊聲', '因此', '未知', '大敗', '百姓', '大事', '一軍', '之後', '接應', '起兵',
                '成都', '原來', '江東', '正是', '忽然', '原來', '大叫', '上馬', '天子', '一面', '太守', '不如', '忽報', '後人', '背後', '先主', '此人',
                '城中', '然後', '大軍', '何不', '先生', '何故', '夫人', '不如', '先鋒', '二人', '不可', '如何', '荊州', '不能', '如此', '主公', '軍士',
                '商議', '引兵', '次日', '大喜', '魏兵', '軍馬', '於是', '東吳', '今日', '左右', '天下', '不敢', '陛下', '人馬', '不知', '都督', '漢中',
                '一人', '衆將', '後主', '只見', '蜀兵', '馬軍', '黃巾', '立功', '白髮', '大吉', '紅旗', '士卒', '錢糧', '於漢', '郎舅', '龍鳳', '古之',
                '白虎', '古人云', '爾乃', '馬飛報', '軒昂', '史官', '侍臣', '列陣', '玉璽', '車駕', '老夫', '伏兵', '都尉', '侍中', '西涼', '安民', '張曰',
                '文武',
                '白旗',
                '祖宗', '尋思','英雄','讚美','乳牙'}  # 排除的詞彙
    key = "玄德曰"
    value = "劉備"
    list_replace = []
    list_replace.append(tuple((key, value)))
    writetxt("內容.txt", dics["內容"])
    with open('內容.txt', 'r', encoding='utf-8') as f:
        text = f.read()
    dic_result = readjieba(text,excludes,list_replace)
    draw.drawWordCloud(dic_result["全部"], "data/微博內容詞雲")
    draw.drawWordCloud(dic_result["地名"], "data/微博地點詞雲")
    draw.drawWordCloud(dic_result["人名"], "data/微博人名詞雲")
    """繪製折線圖"""
    # arrt = [x for x in range(len(dics["評論"]))]
    # drawline(arrt, dics["點贊"], dics["評論"], dics["轉發"], "data/折線圖")

    """點贊象形圖"""
    # drawPictorialBar(arrt,dics["評論"], "data/點贊象形圖")

    """評論漣漪圖"""
    # draw.drawEffectScatter(arrt, dics["評論"],"data/評論漣漪圖")
    """轉發柱狀圖"""
    # draw.drawbar(arrt, dics["轉發"], "data/轉發柱狀圖")
    """微博情感極性"""
    snow_list,a,b,c = read_snowNLP("內容.txt")
    # 保存到txt文件中
    writetxt("data/微博情感極性.txt", snow_list)
    """繪製餅狀圖"""
    attr = ['積極', '消極',"中性"]
    value = [a, b,c]
    draw.drawpie(attr,value,"data/情感積極性餅圖")

Draw .py : https://blog.csdn.net/wei_zhen_dong/article/details/106300719
opdata.py :https://blog.csdn.net/wei_zhen_dong/article/details/105318970
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章