文本分類的調研

最近拿到一批用戶對產品的反饋數據,需要進行文本分類,找出用戶的痛點,從而發掘產品的優化方向。

本文主要調研了:

  1. LDA主題分類模型(python gensim包);
  2. Naive Bayes;
  3. SVM;

首先調研了LDA主題分類模型,使用python gensim包,代碼如下:

import jieba
import jieba.analyse
import  os, sys
from gensim import corpora, models, similarities

train_set = []

walk = os.walk('./data') #數據放在了data目錄,每條反饋數據放在了單獨的一個文件中

jieba.load_userdict("userdict.txt")

stopwords = [line.strip().decode('utf-8') for line in open('./stopwords.txt')]
stopwords.append(' ')


for root, dirs, files in walk:
    for name in files:
        f = open(os.path.join(root, name), 'r')
        raw = f.read()
        word_list = filter(lambda _: _ not in stopwords, jieba.cut(raw, cut_all = False))
        train_set.append(word_list)

num = 20  #提取20個主題

dic = corpora.Dictionary(train_set)
corpus = [dic.doc2bow(text) for text in train_set]
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
lda = models.LdaModel(corpus_tfidf, id2word = dic, num_topics = num)
corpus_lda = lda[corpus_tfidf]

for i in range(0, num):
    print lda.print_topic(i)

結果: 提取出來的主題很難解釋,效果不怎麼好。


接下來調研了有監督學習,Naive Bayes和SVM,代碼實現如下(使用python sklearn)

import jieba
import jieba.analyse
import  os, sys
import sklearn.feature_extraction
import sklearn.naive_bayes as nb
import sklearn.externals.joblib as jl
from sklearn import metrics
import xlrd

reload(sys)
sys.setdefaultencoding('utf8') 

def calculate_result(actual,pred):  
    m_precision = metrics.precision_score(actual,pred);  
    m_recall = metrics.recall_score(actual,pred);  
    print 'predict info:'  
    print 'precision:{0:.3f}'.format(m_precision)  
    print 'recall:{0:0.3f}'.format(m_recall);  
    print 'f1-score:{0:.3f}'.format(metrics.f1_score(actual,pred));  


train_set = []

jieba.load_userdict("userdict.txt")

stopwords = [line.strip().decode('utf-8') for line in open('./stopwords.txt')]
stopwords.append(' ')

kvlist = []
targetlist = []

gnb = nb.MultinomialNB(alpha = 0.01)
fh = sklearn.feature_extraction.FeatureHasher(n_features=15000,non_negative=True,input_type='string')

# get train data, 標記的訓練數據,excel中每行一條數據:第一列爲需要分類的文本數據,第二行爲人工標記的分類
data = xlrd.open_workbook('./data/train.csv.xls')
table = data.sheets()[0]
nrows = table.nrows
ncols = table.ncols

for r in range(1, nrows):
    line = table.row_values(r)
    if line[1] != '':
        targetlist += [int(line[1])]
        wordlist = filter(lambda _: _ not in stopwords, jieba.cut(line[0], cut_all = False))
        kvlist += [ [ i for i in wordlist ] ]

# get test data,標記的測試數據,格式同訓練數據
testlist = []
testtargetlist = []
data = xlrd.open_workbook('./data/test.xls')
table = data.sheets()[0]
nrows = table.nrows
ncols = table.ncols

for r in range(1, nrows):
    line = table.row_values(r)
    if line[1] != '':
        testtargetlist += [int(line[1])]
        wordlist = filter(lambda _: _ not in stopwords, jieba.cut(line[0], cut_all = False))
        testlist += [ [ i for i in wordlist ] ]

#驗證模型的準確率,召回率
print '*************************\nNB\n*************************'  
X = fh.fit_transform(kvlist)
testX = fh.fit_transform(testlist)

gnb.fit(X,targetlist)
result = gnb.predict(testX)

calculate_result(testtargetlist, result)

from sklearn.svm import SVC 
print '*************************\nSVM\n*************************'  
svclf = SVC(kernel = 'linear')#default with 'rbf'  
svclf.fit(X,targetlist)  
pred = svclf.predict(testX);  
calculate_result(testtargetlist,pred);

#使用訓練出來的SVM模型對原始數據進行預測,原始數據格式爲:每條數據單獨存放在一個文本文件中
walk = os.walk('./data/2016')
for root, dirs, files in walk:
    for name in files:
        f = open(os.path.join(root, name), 'r')
        raw = f.read()
        word_list = filter(lambda _: _ not in stopwords, jieba.cut(raw, cut_all = False))
        kvlist = [ [ i for i in word_list ] ]
        X = fh.fit_transform(kvlist)
        pred = svclf.predict(X)
        print raw
        print pred

結果:SVM的準備率,召回率接近70%,比Naive Bayes稍微好些。


參考文檔:

  1. scikit-learn官網文檔
  2. 應用scikit-learn做文本分類
  3. 使用sklearn + jieba中文分詞構建文本分類器
  4. 文本分類的調研
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章