NLP基本任務二:基於深度學習的文本分類

本博客參照了復旦大學計算機科學技術學院邱錫鵬教授的文章https://www.zhihu.com/question/324189960

題目:熟悉Pytorch,用Pytorch重寫《任務一》,實現CNN、RNN的文本分類;

  1. 參考

    1. https://pytorch.org/
    2. Convolutional Neural Networks for Sentence Classification https://arxiv.org/abs/1408.5882
    3. https://machinelearningmastery.com/sequence-classification-lstm-recurrent-neural-networks-python-keras/
  2. word embedding 的方式初始化

  3. 隨機embedding的初始化方式

  4. 用glove 預訓練的embedding進行初始化 https://nlp.stanford.edu/projects/glove/

  5. 知識點:

    1. CNN/RNN的特徵抽取
    2. 詞嵌入
    3. Dropout

代碼:

注:代碼並沒有嚴格參照要求去做。

import nltk
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pylab as plt
%matplotlib inline

import os
os.environ['CUDA_VISIBLE_DEVICES'] = "0"   #設置GPU

#載入數據,注意參數delimiter='\t'
df_train = pd.read_csv(r'sentiment-analysis-on-movie-reviews/train.tsv',delimiter='\t')
df_test = pd.read_csv(r'sentiment-analysis-on-movie-reviews/test.tsv',delimiter='\t')
df_train.head()

#創建transformer,CountVectorizer是屬於常見的特徵數值計算類,是一個文本特徵提取方法。
word_vectorizer = CountVectorizer(ngram_range = (1,1),analyzer = 'word',stop_words = 'english',min_df = 0.001)  #使用默認的英語停用詞表
spare_matric = word_vectorizer.fit_transform(df_train['Phrase'])  #編碼文件,將文本轉化爲稀疏矩陣

# print(spare_matric)
#輸出如:
# (0, 480)	1
#   (0, 352)	1
#   (0, 222)	2
#   (0, 451)	1
#   (1, 222)	1
#   (1, 451)	1
#   (2, 451)	1
# print(sum(spare_matric))  
#按列求和,輸出如:
# (0, 570)	161
# (0, 28)	213

#統計文本單詞出現的頻率
# print(spare_matric.shape)  #(156060, 587)
frequency = sum(spare_matric).toarray()[0]  #toarray後如[[ 179  204  176  ]],所以要加[0]
# print(len(frequency))  #587
# print(frequency)
freq = pd.DataFrame(frequency,index = word_vectorizer.get_feature_names(),columns = ['frequency'])
freq.sort_values('frequency',ascending = False)

#觀察標籤的分佈情況,發現稍微符合正態分佈,若不符合正態分類,可以嘗試通過log等函數進行轉化來使其符合正態分佈。
a = df_train.Sentiment.value_counts()  #統計一列中不同種類各有多少個
# a.plot(kind = 'bar')  #這種圖默認按大小排序
# print(a.index)
# print(a.values)
plt.bar(a.index,a.values)

#更加好看的圖
# a = pd.DataFrame(a)
# a['Rating'] = a.index
# sns.set_style("darkgrid", {"axes.facecolor": ".9"})
# fig, ax = plt.subplots(figsize=(10,6))
# sns.barplot(y='Sentiment', x='Rating', data=a)

#對文本進行預處理
import re
df_train['Phrase'] = df_train['Phrase'].str.lower()
df_train['Phrase'] = df_train['Phrase'].apply(lambda x: re.sub('[^a-zA-Z0-9\s]','',x))
df_test['Phrase'] = df_test['Phrase'].str.lower()
df_test['Phrase'] = df_test['Phrase'].apply(lambda x: re.sub('[^a-zA-Z0-9]','',x))
# print(df_train['Phrase'])

X_train = df_train.Phrase
y_train = df_train.Sentiment

#構造字典和訓練數據
from keras.preprocessing.text import Tokenizer  #參考https://blog.csdn.net/lovebyz/article/details/77712003
tokenizer = Tokenizer()
# print(X_train)
#輸出如下:
# 0         a series of escapades demonstrating the adage ...
# 1         a series of escapades demonstrating the adage ...
# 2                                                  a series
# 3                                                         a
# 4                                                    series
tokenizer.fit_on_texts(X_train.values)  #使用一系列文檔來生成token詞典,每個元素爲一個文檔。

X_train = tokenizer.texts_to_sequences(X_train)  #將多個文檔轉換爲word下標的向量形式
# print(len(X_train))  #156060
# print(X_train[0])  #[2, 304, 3, 15110, 5906, 1, 6499, 9, 51, 8, 49, 13, 1, 3514, 8, 167, 49, 13, 1, 11381, 62, 3, 75, 615, 10453, 19, 576, 3, 75, 2003, 5, 54, 3, 2, 40]
# print(len(X_train[0]))  #35
# print(len(X_train[1]))  #14
# print(len(X_train[2]))  #2
X_test = df_test.Phrase
X_test = tokenizer.texts_to_sequences(X_test)

#將數據集統一長度,一般取最大長度
from keras.preprocessing.sequence import pad_sequences
max_length = max([len(x.split()) for x in df_train['Phrase']])
# print(max_length)  #48
X_train = pad_sequences(X_train,max_length)
X_test = pad_sequences(X_test,max_length)
# print(X_train.shape)  #(156060, 48)
# print(X_test.shape)  #(66292, 48)

#構建深度學習模型
from keras import Sequential
from keras.layers import Embedding,LSTM,Dense

EMBEDDING_DIM = 128
dict_len = len(tokenizer.word_index) + 1
model = Sequential()
model.add(Embedding(dict_len,EMBEDDING_DIM,input_length = max_length))  #參數https://blog.csdn.net/jiangpeng59/article/details/77533309
model.add(LSTM(units = 128,dropout = 0.2,recurrent_dropout = 0.2))  #第一個dropout是x和hidden之間的dropout,第二個是hidden-hidden之間的dropout
model.add(Dense(5,activation = 'softmax'))
model.compile(loss = 'sparse_categorical_crossentropy',optimizer= 'adam',metrics= ['accuracy'])
# print(model.summary())
# Layer (type)                 Output Shape              Param #   
# =================================================================
# embedding_4 (Embedding)      (None, 48, 128)           2099712   
# _________________________________________________________________
# lstm_3 (LSTM)                (None, 128)               131584    
# _________________________________________________________________
# dense_3 (Dense)              (None, 5)                 645       
# =================================================================
# Total params: 2,231,941
# Trainable params: 2,231,941
# Non-trainable params: 0
# _________________________________________________________________
# None

model.fit(X_train,y_train,batch_size= 128,epochs= 7,verbose= 1)
# Epoch 6/7
# 156060/156060 [==============================] - 101s 650us/step - loss: 0.5748 - acc: 0.7544
# Epoch 7/7
# 156060/156060 [==============================] - 101s 644us/step - loss: 0.5448 - acc: 0.7645

#模型預測,提交結果
y_test_pred = model.predict_classes(X_test)
final_pred = pd.read_csv(r'sentiment-analysis-on-movie-reviews/sampleSubmission.csv', sep=',')
final_pred.Sentiment=final_pred
final_pred.to_csv(r'results.csv', sep=',', index=False)

#使用CNN
from keras.layers import Conv1D,Dropout,MaxPooling1D,Flatten
def build_model():
    model = Sequential()
    model.add(Embedding(dict_len,output_dim=32,input_length = max_length))  
    model.add(Conv1D(filters = 32,kernel_size = 3,padding='same',activation='relu'))  
    model.add(MaxPooling1D(pool_size=2))
    model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(5,activation = 'softmax'))
    model.compile(loss = 'sparse_categorical_crossentropy',optimizer= 'adam',metrics= ['accuracy'])
    model.fit(X_train,y_train,batch_size= 128,epochs= 7,verbose= 1)
    return model

model2 = build_model()
# Epoch 6/7
# 156060/156060 [==============================] - 7s 45us/step - loss: 0.6345 - acc: 0.7340
# Epoch 7/7
# 156060/156060 [==============================] - 7s 43us/step - loss: 0.6068 - acc: 0.7462

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章