1.讀取
# 1、讀取數據集 def read_dataset(): file_path = r'SMSSpamCollection' sms = open(file_path, encoding='utf-8') sms_data = [] sms_label = [] csv_reader = csv.reader(sms, delimiter='\t') for line in csv_reader: sms_label.append(line[0]) # 提取出標籤 sms_data.append(preprocessing(line[1])) # 提取出特徵 sms.close() return sms_data, sms_label
2.數據預處理
# 2、數據預處理 def preprocess(text): tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)] # 分詞 stops = stopwords.words('english') # 使用英文的停用詞表 tokens = [token for token in tokens if token not in stops] # 去除停用詞 tokens = [token.lower() for token in tokens if len(token) >= 3] # 大小寫,短詞 wnl = WordNetLemmatizer() tag = nltk.pos_tag(tokens) # 詞性 tokens = [wnl.lemmatize(token, pos=get_wordnet_pos(tag[i][1])) for i, token in enumerate(tokens)] # 詞性還原 preprocessed_text = ' '.join(tokens) return preprocessed_text
3.數據劃分—訓練集和測試集數據劃分
from sklearn.model_selection import train_test_split
x_train,x_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=0, stratify=y_train)
# 3、劃分數據集 def split_dataset(data, label): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=0, stratify=label) return x_train, x_test, y_train, y_test
4.文本特徵提取
sklearn.feature_extraction.text.CountVectorizer
sklearn.feature_extraction.text.TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf2 = TfidfVectorizer()
觀察郵件與向量的關係
向量還原爲郵件
# 4、文本特徵提取 # 把文本轉化爲tf-idf的特徵矩陣 def tfidf_dataset(x_train,x_test): tfidf = TfidfVectorizer() X_train = tfidf.fit_transform(x_train) X_test = tfidf.transform(x_test) return X_train, X_test, tfidf # 向量還原成郵件 def revert_mail(x_train, X_train, model): s = X_train.toarray()[0] print("第一封郵件向量表示爲:", s) a = np.flatnonzero(X_train.toarray()[0]) # 非零元素的位置(index) print("非零元素的位置:", a) print("向量的非零元素的值:", s[a]) b = model.vocabulary_ # 詞彙表 key_list = [] for key, value in b.items(): if value in a: key_list.append(key) # key非0元素對應的單詞 print("向量非零元素對應的單詞:", key_list) print("向量化之前的郵件:", x_train[0])
5.模型選擇
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
說明爲什麼選擇這個模型?
# 5、模型選擇 def mnb_model(x_train, x_test, y_train, y_test): mnb = MultinomialNB() mnb.fit(x_train, y_train) pre = mnb.predict(x_test) print("總數:", len(y_test)) print("預測正確數:", (pre == y_test).sum()) print("預測準確率:",sum(pre == y_test) / len(y_test)) return pre
6.模型評價:混淆矩陣,分類報告
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_predict)
說明混淆矩陣的含義
from sklearn.metrics import classification_report
說明準確率、精確率、召回率、F值分別代表的意義
# 6、模型評價 def class_report(pre, y_test): conf_matrix = confusion_matrix(y_test, pre) print("=====================================================") print("混淆矩陣:\n", conf_matrix) c = classification_report(y_test, pre) print("分類報告:\n", c) print("模型準確率:", (conf_matrix[0][0] + conf_matrix[1][1]) / np.sum(conf_matrix))
完整代碼:
# -*- coding:utf-8 -*- from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import confusion_matrix, classification_report import numpy as np import nltk from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer import csv def get_wordnet_pos(treebank_tag):# 根據詞性,生成還原參數pos if treebank_tag.startswith('J'): # adj return nltk.corpus.wordnet.ADJ elif treebank_tag.startswith('V'): # v return nltk.corpus.wordnet.VERB elif treebank_tag.startswith('N'): # n return nltk.corpus.wordnet.NOUN elif treebank_tag.startswith('R'): # adv return nltk.corpus.wordnet.ADV else: return nltk.corpus.wordnet.NOUN # 預處理 def preprocessing(text): tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)] # 分詞 stops = stopwords.words('english') # 使用英文的停用詞表 tokens = [token for token in tokens if token not in stops] # 停用詞 tokens = [token.lower() for token in tokens if len(token) >= 3] # 大小寫,短詞 lmtzr = WordNetLemmatizer() tag = nltk.pos_tag(tokens) # 詞性 tokens = [lmtzr.lemmatize(token, pos=get_wordnet_pos(tag[i][1])) for i, token in enumerate(tokens)] # 詞性還原 preprocessed_text = ' '.join(tokens) return preprocessed_text # 讀取數據集 def read_dataset(): file_path =r'SMSSpamCollection' sms = open(file_path, encoding='utf-8')#讀取數據 sms_label = [] # 存儲標題 sms_data = []#存儲數據 csv_reader = csv.reader(sms, delimiter='\t') for line in csv_reader: sms_label.append(line[0]) # 提取出標籤 sms_data.append(preprocessing(line[1])) # 對每封郵件做預處理 sms.close() return sms_data, sms_label # 劃分數據集 def split_dataset(data, label): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=0, stratify=label) return x_train, x_test, y_train, y_test # 把原始文本轉化爲tf-idf的特徵矩陣 def tfidf_dataset(x_train,x_test): tfidf = TfidfVectorizer() X_train = tfidf.fit_transform(x_train) # X_train用fit_transform生成詞彙表 X_test = tfidf.transform(x_test) # X_test要與X_train詞彙表相同,因此在X_train進行fit_transform基礎上進行transform操作 return X_train, X_test, tfidf # 向量還原郵件 def revert_mail(x_train, X_train, model): s = X_train.toarray()[0] print("第一封郵件向量表示爲:", s) # 該函數輸入一個矩陣,返回扁平化後矩陣中非零元素的位置(index) a = np.flatnonzero(X_train.toarray()[0]) # 非零元素的位置(index) print("非零元素的位置:", a) print("向量的非零元素的值:", s[a]) b = model.vocabulary_ # 詞彙表 key_list = [] for key, value in b.items(): if value in a: key_list.append(key) # key非0元素對應的單詞 print("向量非零元素對應的單詞:", key_list) print("向量化之前的郵件:", x_train[0]) # 模型選擇(根據數據特點選擇多項式分佈) def mnb_model(x_train, x_test, y_train, y_test): mnb = MultinomialNB() mnb.fit(x_train, y_train) ypre_mnb = mnb.predict(x_test) print("總數:", len(y_test)) print("預測正確數:", (ypre_mnb == y_test).sum()) return ypre_mnb # 模型評價:混淆矩陣,分類報告 def class_report(ypre_mnb, y_test): conf_matrix = confusion_matrix(y_test, ypre_mnb) print("混淆矩陣:\n", conf_matrix) c = classification_report(y_test, ypre_mnb) print("------------------------------------------") print("分類報告:\n", c) print("模型準確率:", (conf_matrix[0][0] + conf_matrix[1][1]) / np.sum(conf_matrix)) if __name__ == '__main__': sms_data, sms_label = read_dataset() # 讀取數據集 x_train, x_test, y_train, y_test = split_dataset(sms_data, sms_label) # 劃分數據集 X_train, X_test,tfidf = tfidf_dataset(x_train, x_test) # 把原始文本轉化爲tf-idf的特徵矩陣 revert_mail(x_train, X_train, tfidf) # 向量還原成郵件 y_mnb = mnb_model(X_train, X_test, y_train,y_test) # 模型選擇 class_report(y_mnb, y_test) # 模型評價
6.比較與總結
如果用CountVectorizer進行文本特徵生成,與TfidfVectorizer相比,效果如何?
- CountVectorizer:只考慮詞彙在文本中出現的頻率,屬於詞袋模型特徵。
- TfidfVectorizer: 除了考量某詞彙在文本出現的頻率,還關注包含這個詞彙的所有文本的數量,能夠削減高頻沒有意義的詞彙出現帶來的影響, 挖掘更有意義的特徵。屬於Tfidf特徵。
- CountVectorizer與TfidfVectorizer相比,對於負類的預測更加準確,而正類的預測則稍遜色。但總體預測正確率也比TfidfVectorizer稍高,相比之下似乎CountVectorizer更適合進行預測。