卷積神經網絡實現文本分類

源代碼鏈接(github地址)
https://github.com/dennybritz/cnn-text-classification-tf
my https://github.com/tddfly/cnn-text-classification-tf
參考博文: https://blog.csdn.net/github_38414650/article/details/74019595

數據集:https://github.com/cystanford/text_classification
包含訓練集(四種類別)、測試集、停用詞表三個文件夾

四個文件
data_helpers.py 數據預處理
train.py神經網絡的訓練過程
text_cnn.py 卷積神經網絡結構
eval.py預測、評估

data_helpers.py文件

import numpy as np
import re
import os
import jieba

# 數據預處理
def clean_str(string):
    """
    Tokenization/string cleaning for all datasets except for SST.
    Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
    """
    string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) # 匹配所有大小寫字母、數字、括號、逗號、感嘆號、問號、引號等,不在這些字符之類的都用空格替代
    #string = re.sub(r"[A-Za-z0-9(),!?\'\`]", " ", string)  注意這個去掉^ 以後,就是將所有中括號內的替換成空格
    string = re.sub(r"\'s", " \'s", string) # 將“'s”用“ 's”代替,即family's變成family 's
    string = re.sub(r"\'ve", " \'ve", string)
    string = re.sub(r"n\'t", " n\'t", string) #將isn't變成 is n't
    string = re.sub(r"\'re", " \'re", string) #將you're變成you 're
    string = re.sub(r"\'d", " \'d", string) #將i'd變成 i 'd
    string = re.sub(r"\'ll", " \'ll", string)
    string = re.sub(r",", " , ", string)
    string = re.sub(r"!", " ! ", string)
    string = re.sub(r"\(", " \( ", string)
    string = re.sub(r"\)", " \) ", string)
    string = re.sub(r"\?", " \? ", string)
    string = re.sub(r"\s{2,}", " ", string) # s是空格,將2個或者2個以上的空格用一個空格代替

    return string.strip().lower()


def load_data_and_labels(positive_data_file, negative_data_file):
    """
    Loads MR polarity data from files, splits the data into words and generates labels.
    Returns split sentences and labels.
    """
    # Load data from files
    positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines())
    positive_examples = [s.strip() for s in positive_examples]  # 去掉換行符
    negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines())
    negative_examples = [s.strip() for s in negative_examples]
    # Split by words
    x_text = positive_examples + negative_examples  # 將兩個列表合併類似extend,將結果放到一個新的列表中;extend是在原有列表中相加
    x_text = [clean_str(sent) for sent in x_text]
    # Generate labels
    positive_labels = [[0, 1] for _ in positive_examples]
    negative_labels = [[1, 0] for _ in negative_examples]
    y = np.concatenate([positive_labels, negative_labels], 0)  # np.concatenate 數組拼接,傳入的參數必須是一個多個數組的元組或者列表
    return [x_text, y]

#  batches = data_helpers.batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size=64, FLAGS.num_epochs=200)
def batch_iter(data, batch_size, num_epochs, shuffle=True): # 每一輪 獲得一個batch的數據
    """
    定義一個函數,輸出batch樣本,參數爲data(包括feature和label),batchsize,epoch
    """
    data = np.array(data)  # [(text0,label0),(test1,label1)..]
    # 注意:data是個列表,因爲裏面的元素的數據類型是 一致的,所以用數組np.array存儲,節省內存
    # type(data)返回data的數據類型,data.dtype返回數組中內容的數據類型。
    data_size = len(data)
    num_batches_per_epoch = int((len(data)-1)/batch_size) + 1  # 每次迭代訓練所有數據,分成多少個batch
    for epoch in range(num_epochs):  # 在每一輪迭代過程中,都打亂數據
        # Shuffle the data at each epoch
        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_size))  #打亂的索引
            # 注:shuffle 與 permutation的區別:都是對原來的數組進行重新洗牌,shuffle直接在原來的數組上進行操作,改變原來的元素順序,無返回值;
            #  permutation不直接在原來的數組上進行操作,而是返回一個新的打亂順序的數組,並不改變原來的數組。
            shuffled_data = data[shuffle_indices]  # 數據打亂
        else:
            shuffled_data = data
        for batch_num in range(num_batches_per_epoch):  # 對於每個batch的數據,獲得batch內的起始與終止的位置
            start_index = batch_num * batch_size
            end_index = min((batch_num + 1) * batch_size, data_size)
            yield shuffled_data[start_index:end_index]
            # yield,在for循環執行時,每次返回一個batch的data,佔用的內存爲常數

#============分隔線==================
# 返回的數據格式,x_corpus爲列表,裏面每個元素是一個文本字符串
# y_label是個列表,對應文本的標籤,ont-hot表示,如[ [1,0,0,0],[0,1,0,0]]裏面是2個標籤
def load_data_and_labels_3_27(corpus_path,stop_list_Path):
    x_corpus = []
    y_label = []
    cate_dir = os.listdir(corpus_path) # 獲取子類別目錄
    # [女性、體育、文學、校園]
    for cate in cate_dir:
        if cate == '女性':
            label = [1,0,0,0]
        elif cate =='體育':
            label = [0, 1, 0, 0]
        elif cate =='文學':
            label = [0, 0, 1, 0]
        else:
            label = [0,0,0,1]

        cate_complete_dir = corpus_path+'\\'+cate+"\\" # 獲取子類別的完整路徑
        file_dir = os.listdir(cate_complete_dir)#獲取每個類別下的文件
        for file in file_dir:
            file_complete_dir = cate_complete_dir+file # 獲取每個類別下的文件的完整路徑
            content = readfile(file_complete_dir) # 返回一個文本
            # 對文本進行處理,刪除換行以及多餘空格
            content = content.replace("\n",'').strip()
            content_seg = jieba.cut(content)
            #創建停用詞表
            stopwords = stopwordsList(stop_list_Path)
            outstr =''
            for word in content_seg:
                if word not in stopwords:
                    if word !='\t':
                        outstr+=word
                        outstr+=" "
            text = ' '.join(outstr)
            x_corpus.append(text)
            y_label.append(label)
    return [x_corpus,y_label]

def stopwordsList(stop_list_Path):
    f = open(stop_list_Path,'r',encoding='utf-8')
    stopwords = [line.strip() for line in f.readlines()]
    return stopwords

def readfile(filepath):
    f = open(filepath,'r',encoding='gb2312',errors='ignore')
    content = f.read()
    # read()返回的是字符串,讀全文本的內容。readline()返回一行,是字符串類型。readlines()讀取所有行,保存在列表中
    f.close()
    return content
    # 這裏返回整個文本,以便後續進行分詞

text_cnn.py文件

import tensorflow as tf
import numpy as np

# 網絡結構設計
# 一個embedding layer+一個convolution layer(Relu)+一個maxpooling層+softmax
class TextCNN(object):
    """
    A CNN for text classification.
    Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
    """
    def __init__(
      self, sequence_length, num_classes, vocab_size,
      embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
        # sequence_length 最長句子長度,不足長度就填充
        # num_classes:分類的類別數
        # vocab_size:字典詞彙大小
        # embedding_size:詞向量維度
        # filter_sizes:卷積核尺寸,簡寫爲3,4,5。實際上爲3*embedding_size
        # num_filters:每種尺寸的卷積核的個數
        # l2_reg_lambda=0.0 :L2正則化參數

        # Placeholders for input, output and dropout
        # 句子矩陣,長爲句子數量(自適應、樣本個數),寬爲sequence_length(句子固定寬度)
        self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x") # 輸入數據 維度爲batch_size*sequence_length.一段文本是一個樣本
        # 存儲對應分類結果 長度自適應、寬度爲num_classes
        self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)

        # Embedding layer
        with tf.device('/cpu:0'), tf.name_scope("embedding"):
            # self.W可以理解爲詞向量詞典,存儲vocab_size個大小爲embedding_size的詞向量,隨機初始化爲-1帶1之間的值
            self.W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),name="W") # 隨機初始化embedding矩陣
            # input_x是詞的id序列
            self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x) # 查找一個張量裏面 索引對應的元素
            # 第一個參數爲一個張量或者索引、第二個參數爲索引
            # embedded_chars是輸入input_x對應的詞向量表示,維度爲[句子數量,sequence_length,embedding_size]
            self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
            # tf.expand_dims(input,dim,name=None) 增加維度,主要是卷積2d輸入是四維,這裏是將詞向量擴充一個維度,
            # 維度變爲[句子數量, sequence_length, embedding_size, 1],方便卷積

        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = [] # 五維向量,對於每種尺寸的卷積核,得到的結果是四維向量。
        for i, filter_size in enumerate(filter_sizes): # 迭代索引及對應的尺寸
            with tf.name_scope("conv-maxpool-%s" % filter_size): # 對於每種尺寸的卷積核,創建一個命名空間
                # 輸入:batch_size(句子數) * sequence_length (句子定長)* embedding_size (對應寬度)*1(輸入通道數)
                # 卷積尺寸 :        filter_size(卷積核高度),embedding_size(卷積核寬度、詞向量維度),1(圖像通道數), num_filters(輸出通道數)
                # 卷積輸出 :batch_size * (sequence_length-filter_size+1) *  1 * num_filters
                # 池化尺寸 :             (sequence_length-filter_size+1) *  1
                # 池化輸出 :batch_size * 1 * 1 * num_filters  三維矩陣

                # Convolution Layer
                # num_filters個(輸出通道數)大小爲filter_size*embedding_size的卷積核,輸入通道數爲1
                filter_shape = [filter_size, embedding_size, 1, num_filters]
                # 卷積核寬、高、輸入通道數、輸出通道數—深度(卷積核的個數)
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
                conv = tf.nn.conv2d(
                    self.embedded_chars_expanded, # 輸入、卷積參數、步長、填充方式-不填0
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",# VALID窄卷積,SAME爲等長卷積
                    name="conv")
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                # pooled 爲 batch_size * 1 * num_filters。其中1個pool值爲一個列表,比如說batch_size=3,num_filters = 2
                # pooled:[  [[1],[2]],  [[3],[4]],  [[5],[6]] ]  # 最裏面的元素表示一個filter得到的一個特徵值

                # [[3,4,5],[1,2,0]],每個元素爲一個batch的num_filters個pool值
                pooled_outputs.append(pooled)  # 每個樣本pooled_outputs中含有num_filters個數量的特徵
                # pooled_outputs爲五維矩陣[   ]
                #[len(filter_sizes), batch, height, width, channels = 1] # 對width,即詞的角度串聯

        # Combine all the pooled features
        num_filters_total = num_filters * len(filter_sizes)
        self.h_pool = tf.concat(pooled_outputs, 3)
        # 將不同核尺寸的對應特徵進行拼接,比如說[[3]]與[[4]]拼接後就是[[34]]

        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
        # 扁平化,壓成一維,維度爲batch_size * 卷積核總數

        # Add dropout
        with tf.name_scope("dropout"): # dropout層,對池化後的結果做dropout
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)


        # Final (unnormalized) scores and predictions
        with tf.name_scope("output"):
            W = tf.get_variable(
                "W",
                shape=[num_filters_total, num_classes],
                initializer=tf.contrib.layers.xavier_initializer())
            b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores") # matmul(x, weights) + biases.
            self.predictions = tf.argmax(self.scores, 1, name="predictions") # 返回索引

        # Calculate mean cross-entropy loss
        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
            #losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.scores,labels=tf.argmax(self.input_y))
            # 交叉熵
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")

train.py文件

#! /usr/bin/env python
# 網絡訓練
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn

# 命令行參數
# ==================================================
# Vocabulary Size: 3658
# Train/Dev split: 2976/330

# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", 0.1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")

tf.flags.DEFINE_string("corpus_train_path",r'I:\代碼\tensorflow\cnn-text-classification-tf-master\cnn-text-classification-tf-master\data\text_classification_data\train',"訓練集路徑")
tf.flags.DEFINE_string("stop_list_Path",r'I:\代碼\tensorflow\cnn-text-classification-tf-master\cnn-text-classification-tf-master\data\text_classification_data\stop\stopword.txt','停用詞路徑')



# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")

# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
# 每100個batch後在驗證集上評估一次
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
# 每100個batch後次保存checkpoint,即模型參數
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")

FLAGS = tf.flags.FLAGS
# FLAGS._parse_flags()
#FLAGS.flag_values_dict()
# print("\nParameters:")
# for attr, value in sorted(FLAGS.__flags.items()):
#     print("{}={}".format(attr.upper(), value))
# print("")

# 數據預處理,包括:構建詞典,將文本變成詞id序列,隨機打亂數據、劃分數據集(訓練集、驗證集、可用交叉驗證)
def preprocess():
    # Data Preparation
    # ==================================================

    # Load data
    print("Loading data...")
    x_text, y = data_helpers.load_data_and_labels_3_27(FLAGS.corpus_train_path,FLAGS.stop_list_Path)
    # x_text 爲訓練集,以列表的形式存儲,每一個元素爲字符串,處理過的文本
    # y爲每個文本對應的標籤,以列表存儲,每個元素爲一個列表(one_hot向量表示)

    # Build vocabulary 詞典的id是從1開始的,0是留給那些未出現的詞以及作填充長度用的
    max_document_length = max([len(x.split(" ")) for x in x_text]) # 統計每個文本的長度,並獲得文本最大的長度
    vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
    # 構建詞典,文本中每個詞分配個id
    x = np.array(list(vocab_processor.fit_transform(x_text)))
    # 將文本表示成詞id的形式。 x爲numpy.ndarray  類似[[1 2 3 0] [4 5 0 0]]這種形式

    # Randomly shuffle data
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(y)))
    x_shuffled = x[shuffle_indices]
    y_shuffled = np.array(y)[shuffle_indices]

    # Split train/test set
    # TODO: This is very crude, should use cross-validation
    # 這裏只是簡單的劃分,對於小數據來說,應該使用交叉驗證
    dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
    x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
    y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]

    del x, y, x_shuffled, y_shuffled

    # 獲取詞典的大小以及訓練集、驗證集的比例
    print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
    print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
    return x_train, y_train, vocab_processor, x_dev, y_dev

def train(x_train, y_train, vocab_processor, x_dev, y_dev):
    # Training
    # ==================================================

    with tf.Graph().as_default():
        # tf.ConfigProto用在創建session的時候,用來對session進行參數配置
        session_conf = tf.ConfigProto(
          allow_soft_placement=FLAGS.allow_soft_placement, #  記錄設備指派情況
          log_device_placement=FLAGS.log_device_placement) # 自動選擇運行設備
        sess = tf.Session(config=session_conf)
        with sess.as_default():
            cnn = TextCNN(
                sequence_length=x_train.shape[1],
                num_classes=y_train.shape[1],
                vocab_size=len(vocab_processor.vocabulary_),
                embedding_size=FLAGS.embedding_dim,
                filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
                num_filters=FLAGS.num_filters,
                l2_reg_lambda=FLAGS.l2_reg_lambda)

            # Define Training procedure
            global_step = tf.Variable(0, name="global_step", trainable=False)
            # 初始化爲0,每次自動加1
            optimizer = tf.train.AdamOptimizer(1e-3)
            # 計算梯度,返回元組列表[ (gradient,variable)..]
            grads_and_vars = optimizer.compute_gradients(cnn.loss)
            # 根據梯度更新變量
            train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
            # 注意:每次更新參數都會加1次,這裏global_step=global_step這個操作會自動加1.
            #train_op = optimizer.minimize(cnn.loss,global_step=global_step) 上面兩個步驟等價於這一步,分開方便對梯度進行約束

            # Keep track of gradient values and sparsity (optional) 可視化
            # 查看一個張量在訓練過程中值的分佈情況時,可通過tf.summary.histogram()將其分佈情況以直方圖的形式在TensorBoard直方圖儀表板上顯示
            grad_summaries = []
            for g, v in grads_and_vars:  # [ (gradient,variable)..] g:梯度,v:變量
                if g is not None:
                    grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
                    sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
                    grad_summaries.append(grad_hist_summary)
                    grad_summaries.append(sparsity_summary)
            grad_summaries_merged = tf.summary.merge(grad_summaries)

            # Output directory for models and summaries
            timestamp = str(int(time.time()))
            out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
            print("Writing to {}\n".format(out_dir))

            # tf.summary()的各類方法,能夠保存訓練過程以及參數分佈圖並在tensorboard顯示。

            # Summaries for loss and accuracy
            loss_summary = tf.summary.scalar("loss", cnn.loss)
            acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)

            # Train Summaries
            train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
            train_summary_dir = os.path.join(out_dir, "summaries", "train")
            train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)

            # Dev summaries
            dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
            dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
            dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)

            # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
            checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
            checkpoint_prefix = os.path.join(checkpoint_dir, "model")
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)
            saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)

            # Write vocabulary
            vocab_processor.save(os.path.join(out_dir, "vocab"))

            # Initialize all variables
            sess.run(tf.global_variables_initializer())

            # 定義了一個函數,輸入爲1個batch
            def train_step(x_batch, y_batch):
                """
                A single training step
                """
                feed_dict = {
                  cnn.input_x: x_batch,
                  cnn.input_y: y_batch,
                  cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
                }
                _, step, summaries, loss, accuracy = sess.run(
                    [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
                    feed_dict)
                # #梯度更新(更新模型),步驟加一,存儲數據,計算一個batch的損失,計算一個batch的準確率
                time_str = datetime.datetime.now().isoformat()
                print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
                # 注意:這裏的step是第幾次更新參數,即一個batch更新一次參數,並不是num_epochs次數
                # step總次數的計算過程爲 每一輪含有的batch數目(文本總數量/一個batch含有的文本的數量)*總輪數(200)
                # (2976/64)=46.40 = 47, 47 * 200 =9400
                train_summary_writer.add_summary(summaries, step)

            # 定義了一個函數,用於驗證集,輸入爲一個batch
            def dev_step(x_batch, y_batch, writer=None):
                """
                Evaluates model on a dev set
                """
                feed_dict = {
                  cnn.input_x: x_batch,
                  cnn.input_y: y_batch,
                  cnn.dropout_keep_prob: 1.0
                }
                step, summaries, loss, accuracy = sess.run(
                    [global_step, dev_summary_op, cnn.loss, cnn.accuracy],
                    feed_dict)
                time_str = datetime.datetime.now().isoformat()
                print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
                if writer:
                    writer.add_summary(summaries, step)


            # Generate batches(生成器),得到一個generator,每一次返回一個batch,沒有構成list[batch1,batch2,batch3,...]

            batches = data_helpers.batch_iter(
                list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
            # 這裏控制迭代次數
            # Training loop. For each batch...
            for batch in batches:
                x_batch, y_batch = zip(*batch)
                train_step(x_batch, y_batch)
                current_step = tf.train.global_step(sess, global_step)
                if current_step % FLAGS.evaluate_every == 0:
                    print("\nEvaluation:")
                    dev_step(x_dev, y_dev, writer=dev_summary_writer)
                    print("")
                if current_step % FLAGS.checkpoint_every == 0:
                    path = saver.save(sess, checkpoint_prefix, global_step=current_step)
                    print("Saved model checkpoint to {}\n".format(path))

def main(argv=None):
    x_train, y_train, vocab_processor, x_dev, y_dev = preprocess()
    train(x_train, y_train, vocab_processor, x_dev, y_dev)

if __name__ == '__main__':
    tf.app.run()

eval.py文件

#! /usr/bin/env python
# 預測與評估
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
import csv

# Parameters
# ==================================================

# Data Parameters
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")

tf.flags.DEFINE_string("Test_corpus",r"I:\代碼\tensorflow\cnn-text-classification-tf-master\cnn-text-classification-tf-master\data\text_classification_data\test","測試語料集")
tf.flags.DEFINE_string("stop_list_Path",r'I:\代碼\tensorflow\文本分類\分類數據\text_classification\stop\stopword.txt','停用詞路徑')

# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", r"I:\代碼\tensorflow\cnn-text-classification-tf-master\cnn-text-classification-tf-master\runs\1553692717\checkpoints", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", True, "Evaluate on all training data")

# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")


FLAGS = tf.flags.FLAGS
#FLAGS._parse_flags()
# FLAGS.flag_values_dict()
# print("\nParameters:")
# for attr, value in sorted(FLAGS.__flags.items()):
#     print("{}={}".format(attr.upper(), value))
# print("")

# CHANGE THIS: Load data. Load your own data here
# if裏面放的自己的測試集 eval_train = True時
if FLAGS.eval_train:
    #x_raw, y_test = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
    x_raw, y_test = data_helpers.load_data_and_labels_3_27(FLAGS.Test_corpus,FLAGS.stop_list_Path)
    y_test = np.argmax(y_test, axis=1)  # 類別變成0,1,2,3。分別對應女性、體育、文學、校園、
else:
    x_raw = ["a masterpiece four years in the making", "everything is off."]
    y_test = [1, 0]
    #y_test = [2, 0]

# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab") # ..表示上一級
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))

print("\nEvaluating...\n")

# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
    session_conf = tf.ConfigProto(
      allow_soft_placement=FLAGS.allow_soft_placement,
      log_device_placement=FLAGS.log_device_placement)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        # Load the saved meta graph and restore variables
        saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
        saver.restore(sess, checkpoint_file)

        # Get the placeholders from the graph by name
        input_x = graph.get_operation_by_name("input_x").outputs[0]
        # input_y = graph.get_operation_by_name("input_y").outputs[0]
        dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]

        # Tensors we want to evaluate
        predictions = graph.get_operation_by_name("output/predictions").outputs[0]

        # Generate batches for one epoch
        batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)

        # Collect the predictions here
        all_predictions = []

        for x_test_batch in batches:
            batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
            all_predictions = np.concatenate([all_predictions, batch_predictions])

# Print accuracy if y_test is defined
if y_test is not None:
    correct_predictions = float(sum(all_predictions == y_test))
    print("Total number of test examples: {}".format(len(y_test)))
    print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))

# Save the evaluation to a csv
# 待預測的已經分詞的文本,預測的類別,正確的類別,bo(預測正確返回True,否則返回False)
bo = []
for i in range(len(y_test)):
    if all_predictions[i]==y_test[i]:
        bo.append("True")
        #bo.append(" ")
    else :
        bo.append(False)
#predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))
predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions,y_test,bo))
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w',encoding='utf-8') as f:
    csv.writer(f).writerows(predictions_human_readable)

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章