TF-IDF(term frequency–inverse document frequency)是一種用於信息檢索與數據挖掘的常用加權技術。TF意思是詞頻(Term Frequency),IDF意思是逆文本頻率指數(Inverse Document Frequency)。
TF-IDF實際上是:TF * IDF,TF詞頻(Term Frequency),IDF逆向文件頻率(Inverse Document Frequency)。
TF表示詞條在文檔d中出現的頻率。
IDF的主要思想是:如果包含詞條t的文檔越少,也就是n越小,IDF越大,則說明詞條t具有很好的類別區分能力。
注意事項:
userdict.txt :是我需要增加專業性的詞彙的詞庫
stop_words.txt:是我的停用詞詞庫
df_data :數據庫中100篇文章的text文檔(這裏df_data是一個文檔集,每篇文章佔據一行,100篇文章則是100行顯示。)讀取的時候默認是每一行識別爲一篇文章
完整代碼可以直接複製使用:
#! /usr/bin/python
# -*- coding: utf8 -*-
# @Time : 2019/3/4 15:40
# @Author : yukang
# 來源:https://blog.csdn.net/a2099948768/article/details/89189587
import sys,codecs
import pandas as pd
import numpy as np
import jieba.posseg
import jieba.analyse
from pyquery import PyQuery
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from MysqlDBPool import *
from Kmeans_word import *
"""
TF-IDF權重
1,CountVectorizer 構建詞頻舉證
2,TfidfTransformer 構建tfidf權重計算
3,文本的關鍵詞
4,對應的tfidf矩陣
"""
jieba.load_userdict('./userdict.txt') # 加載外部 用戶詞典
# 數據預處理操作: 分詞,去停用詞,詞性篩選
def dataPrepos(text,stopkey):
l = []
pos = ['n','nz','v','vd','vn','l','a','d'] # https://blog.csdn.net/a2099948768/article/details/82216906 有註釋
seg = jieba.posseg.cut(PyQuery(text).text())
for i in seg:
if i.word not in stopkey and i.flag in pos:
l.append(i.word)
return l
# tf-idf獲取文本top10關鍵詞
def getKeywords_tfidf(data,stopkey,topK):
idList,titleList,abstractList = data['id'],data['title'],data['abstract']
corpus = [] # 將所有文檔輸出到一個list中,一行就是一個文檔
for index in range(len(idList)):
text = '%s。%s'%(titleList[index],abstractList[index])
text = dataPrepos(text,stopkey) # 文本預處理
text = " ".join(text)
corpus.append(text)
# 1,構建詞頻矩陣,將文本中的詞語轉換成詞頻矩陣
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(corpus) # 詞頻矩陣,a[i][j]:表示j詞在第i個文本中的詞頻
# 2,統計每個詞的tf-idf權重
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(X)
# 3,獲取詞袋模型中的關鍵詞
word = vectorizer.get_feature_names()
# 4,獲取tf-idf矩陣,a[i][j]表示j詞在i篇文本中的tf-idf權重
weight = tfidf.toarray()
# 5,打印詞語權重
ids, titles, keys = [],[],[]
for i in range(len(weight)):
# print("----------這裏輸出第",i+1,"篇文本的詞語tf-idf--------")
ids.append(idList[i])
titles.append(titleList[i])
df_word,df_weight = [],[] # 當前文章的所有詞彙列表,詞彙對應的權重列表
for j in range(len(word)):
# print(word[j],weight[i][j])
df_word.append(word[j])
df_weight.append(weight[i][j])
df_word = pd.DataFrame(df_word,columns=['word'])
df_weight = pd.DataFrame(df_weight,columns=['weight'])
word_weight = pd.concat([df_word,df_weight],axis=1) # 拼接詞彙列表和權重列表
getkeywords_kmeans(word_weight, 100) # 對關鍵詞做聚類
word_weight = word_weight.sort_values(by='weight', ascending=False) # 按照權重值降序排列
keyword = np.array(word_weight['word']) # 選擇詞彙列並轉成數組格式
word_split = [keyword[x] for x in range(0,topK)]
word_split = " ".join(word_split)
keys.append(word_split.encode("utf-8"))
result = pd.DataFrame({"id":ids,"title":titles,"key":keys},columns=["id","title","key"])
return result
def main():
topK = 150
select_data = "SELECT ID,TITLE,FACT,RESULT,BASIS FROM `wenshu_detail` WHERE ID>0 AND ID<101"
all_data = mysql_pool_client.exec_query(select_data)
df_data = pd.DataFrame([[i[0],i[1],i[2]+i[3]+i[4]] for i in all_data], columns=['id','title','abstract'])
# # 讀取數據集
# dataFile = "./text.csv"
# data = pd.read_csv(dataFile)
# 停用詞
stopkey = [w.strip() for w in codecs.open("./stop_words.txt","r",encoding="utf-8").readlines()]
# tf-idf關鍵詞抽取
result = getKeywords_tfidf(df_data,stopkey,topK)
print(result)
if __name__ == '__main__':
main()