NLP實踐-Task1 THUCNews和IMDB數據集探索

全部代碼鏈接:https://github.com/gaussic/text-classification-cnn-rnn
數據處理py,包含了對數據集的分詞、去除停用詞、特徵提取等功能。

import jieba
import pandas as pd
import tensorflow as tf
from collections import Counter
from gensim.models import Word2Vec
from sklearn.feature_extraction.text import CountVectorizer


# 讀取停用詞
def read_stopword(filename):
    stopword = []
    fp = open(filename, 'r')
    for line in fp.readlines():
        stopword.append(line.replace('\n', ''))
    fp.close()
    return stopword


# 切分數據,並刪除停用詞
def cut_data(data, stopword):
    words = []
    for content in data['content']:
        word = list(jieba.cut(content))
        for w in list(set(word) & set(stopword)):
            while w in word:
                word.remove(w)
        words.append(word)
    data['content'] = words
    return data


# 獲取單詞列表
def word_list(data):
    all_word = []
    for word in data['content']:
        all_word.extend(word)
    return all_word


# 提取特徵
def feature(train_data, test_data, val_data):
    content = pd.concat([train_data['content'], test_data['content'], val_data['content']], ignore_index=True)
    # count_vec = CountVectorizer(max_features=300, min_df=2)
    # count_vec.fit_transform(content)
    # train_fea = count_vec.transform(train_data['content']).toarray()
    # test_fea = count_vec.transform(test_data['content']).toarray()
    # val_fea = count_vec.transform(val_data['content']).toarray()
    model = Word2Vec(content, size=100, min_count=1, window=10, iter=10)
    train_fea = train_data['content'].apply(lambda x: model[x])
    test_fea = test_data['content'].apply(lambda x: model[x])
    val_fea = val_data['content'].apply(lambda x: model[x])
    return train_fea, test_fea, val_fea


if __name__ == '__main__':
    train_data = pd.read_csv('./data/task1/cnews/cnews.train.txt', names=['title', 'content'], sep='\t')  # (50000, 2)
    test_data = pd.read_csv('./data/task1/cnews/cnews.test.txt', names=['title', 'content'], sep='\t')  # (10000, 2)
    val_data = pd.read_csv('./data/task1/cnews/cnews.val.txt', names=['title', 'content'], sep='\t')  # (5000, 2)

    train_data = train_data.head(50)
    test_data = test_data.head(50)
    val_data = val_data.head(50)

    stopword = read_stopword('./data/stopword.txt')
    train_data = cut_data(train_data, stopword)
    test_data = cut_data(test_data, stopword)
    val_data = cut_data(val_data, stopword)

    train_fea, test_fea, val_fea = feature(train_data, test_data, val_data)
    print(train_fea)

    all_word = []
    all_word.extend(word_list(train_data))
    all_word.extend(word_list(test_data))
    all_word.extend(word_list(val_data))
    all_word = list(set(all_word))

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章