卷积神经网络实现文本分类

源代码链接(github地址)
https://github.com/dennybritz/cnn-text-classification-tf
my https://github.com/tddfly/cnn-text-classification-tf
参考博文: https://blog.csdn.net/github_38414650/article/details/74019595

数据集:https://github.com/cystanford/text_classification
包含训练集(四种类别)、测试集、停用词表三个文件夹

四个文件
data_helpers.py 数据预处理
train.py神经网络的训练过程
text_cnn.py 卷积神经网络结构
eval.py预测、评估

data_helpers.py文件

import numpy as np
import re
import os
import jieba

# 数据预处理
def clean_str(string):
    """
    Tokenization/string cleaning for all datasets except for SST.
    Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
    """
    string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) # 匹配所有大小写字母、数字、括号、逗号、感叹号、问号、引号等,不在这些字符之类的都用空格替代
    #string = re.sub(r"[A-Za-z0-9(),!?\'\`]", " ", string)  注意这个去掉^ 以后,就是将所有中括号内的替换成空格
    string = re.sub(r"\'s", " \'s", string) # 将“'s”用“ 's”代替,即family's变成family 's
    string = re.sub(r"\'ve", " \'ve", string)
    string = re.sub(r"n\'t", " n\'t", string) #将isn't变成 is n't
    string = re.sub(r"\'re", " \'re", string) #将you're变成you 're
    string = re.sub(r"\'d", " \'d", string) #将i'd变成 i 'd
    string = re.sub(r"\'ll", " \'ll", string)
    string = re.sub(r",", " , ", string)
    string = re.sub(r"!", " ! ", string)
    string = re.sub(r"\(", " \( ", string)
    string = re.sub(r"\)", " \) ", string)
    string = re.sub(r"\?", " \? ", string)
    string = re.sub(r"\s{2,}", " ", string) # s是空格,将2个或者2个以上的空格用一个空格代替

    return string.strip().lower()


def load_data_and_labels(positive_data_file, negative_data_file):
    """
    Loads MR polarity data from files, splits the data into words and generates labels.
    Returns split sentences and labels.
    """
    # Load data from files
    positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines())
    positive_examples = [s.strip() for s in positive_examples]  # 去掉换行符
    negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines())
    negative_examples = [s.strip() for s in negative_examples]
    # Split by words
    x_text = positive_examples + negative_examples  # 将两个列表合并类似extend,将结果放到一个新的列表中;extend是在原有列表中相加
    x_text = [clean_str(sent) for sent in x_text]
    # Generate labels
    positive_labels = [[0, 1] for _ in positive_examples]
    negative_labels = [[1, 0] for _ in negative_examples]
    y = np.concatenate([positive_labels, negative_labels], 0)  # np.concatenate 数组拼接,传入的参数必须是一个多个数组的元组或者列表
    return [x_text, y]

#  batches = data_helpers.batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size=64, FLAGS.num_epochs=200)
def batch_iter(data, batch_size, num_epochs, shuffle=True): # 每一轮 获得一个batch的数据
    """
    定义一个函数,输出batch样本,参数为data(包括feature和label),batchsize,epoch
    """
    data = np.array(data)  # [(text0,label0),(test1,label1)..]
    # 注意:data是个列表,因为里面的元素的数据类型是 一致的,所以用数组np.array存储,节省内存
    # type(data)返回data的数据类型,data.dtype返回数组中内容的数据类型。
    data_size = len(data)
    num_batches_per_epoch = int((len(data)-1)/batch_size) + 1  # 每次迭代训练所有数据,分成多少个batch
    for epoch in range(num_epochs):  # 在每一轮迭代过程中,都打乱数据
        # Shuffle the data at each epoch
        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_size))  #打乱的索引
            # 注:shuffle 与 permutation的区别:都是对原来的数组进行重新洗牌,shuffle直接在原来的数组上进行操作,改变原来的元素顺序,无返回值;
            #  permutation不直接在原来的数组上进行操作,而是返回一个新的打乱顺序的数组,并不改变原来的数组。
            shuffled_data = data[shuffle_indices]  # 数据打乱
        else:
            shuffled_data = data
        for batch_num in range(num_batches_per_epoch):  # 对于每个batch的数据,获得batch内的起始与终止的位置
            start_index = batch_num * batch_size
            end_index = min((batch_num + 1) * batch_size, data_size)
            yield shuffled_data[start_index:end_index]
            # yield,在for循环执行时,每次返回一个batch的data,占用的内存为常数

#============分隔线==================
# 返回的数据格式,x_corpus为列表,里面每个元素是一个文本字符串
# y_label是个列表,对应文本的标签,ont-hot表示,如[ [1,0,0,0],[0,1,0,0]]里面是2个标签
def load_data_and_labels_3_27(corpus_path,stop_list_Path):
    x_corpus = []
    y_label = []
    cate_dir = os.listdir(corpus_path) # 获取子类别目录
    # [女性、体育、文学、校园]
    for cate in cate_dir:
        if cate == '女性':
            label = [1,0,0,0]
        elif cate =='体育':
            label = [0, 1, 0, 0]
        elif cate =='文学':
            label = [0, 0, 1, 0]
        else:
            label = [0,0,0,1]

        cate_complete_dir = corpus_path+'\\'+cate+"\\" # 获取子类别的完整路径
        file_dir = os.listdir(cate_complete_dir)#获取每个类别下的文件
        for file in file_dir:
            file_complete_dir = cate_complete_dir+file # 获取每个类别下的文件的完整路径
            content = readfile(file_complete_dir) # 返回一个文本
            # 对文本进行处理,删除换行以及多余空格
            content = content.replace("\n",'').strip()
            content_seg = jieba.cut(content)
            #创建停用词表
            stopwords = stopwordsList(stop_list_Path)
            outstr =''
            for word in content_seg:
                if word not in stopwords:
                    if word !='\t':
                        outstr+=word
                        outstr+=" "
            text = ' '.join(outstr)
            x_corpus.append(text)
            y_label.append(label)
    return [x_corpus,y_label]

def stopwordsList(stop_list_Path):
    f = open(stop_list_Path,'r',encoding='utf-8')
    stopwords = [line.strip() for line in f.readlines()]
    return stopwords

def readfile(filepath):
    f = open(filepath,'r',encoding='gb2312',errors='ignore')
    content = f.read()
    # read()返回的是字符串,读全文本的内容。readline()返回一行,是字符串类型。readlines()读取所有行,保存在列表中
    f.close()
    return content
    # 这里返回整个文本,以便后续进行分词

text_cnn.py文件

import tensorflow as tf
import numpy as np

# 网络结构设计
# 一个embedding layer+一个convolution layer(Relu)+一个maxpooling层+softmax
class TextCNN(object):
    """
    A CNN for text classification.
    Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
    """
    def __init__(
      self, sequence_length, num_classes, vocab_size,
      embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
        # sequence_length 最长句子长度,不足长度就填充
        # num_classes:分类的类别数
        # vocab_size:字典词汇大小
        # embedding_size:词向量维度
        # filter_sizes:卷积核尺寸,简写为3,4,5。实际上为3*embedding_size
        # num_filters:每种尺寸的卷积核的个数
        # l2_reg_lambda=0.0 :L2正则化参数

        # Placeholders for input, output and dropout
        # 句子矩阵,长为句子数量(自适应、样本个数),宽为sequence_length(句子固定宽度)
        self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x") # 输入数据 维度为batch_size*sequence_length.一段文本是一个样本
        # 存储对应分类结果 长度自适应、宽度为num_classes
        self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)

        # Embedding layer
        with tf.device('/cpu:0'), tf.name_scope("embedding"):
            # self.W可以理解为词向量词典,存储vocab_size个大小为embedding_size的词向量,随机初始化为-1带1之间的值
            self.W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),name="W") # 随机初始化embedding矩阵
            # input_x是词的id序列
            self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x) # 查找一个张量里面 索引对应的元素
            # 第一个参数为一个张量或者索引、第二个参数为索引
            # embedded_chars是输入input_x对应的词向量表示,维度为[句子数量,sequence_length,embedding_size]
            self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
            # tf.expand_dims(input,dim,name=None) 增加维度,主要是卷积2d输入是四维,这里是将词向量扩充一个维度,
            # 维度变为[句子数量, sequence_length, embedding_size, 1],方便卷积

        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = [] # 五维向量,对于每种尺寸的卷积核,得到的结果是四维向量。
        for i, filter_size in enumerate(filter_sizes): # 迭代索引及对应的尺寸
            with tf.name_scope("conv-maxpool-%s" % filter_size): # 对于每种尺寸的卷积核,创建一个命名空间
                # 输入:batch_size(句子数) * sequence_length (句子定长)* embedding_size (对应宽度)*1(输入通道数)
                # 卷积尺寸 :        filter_size(卷积核高度),embedding_size(卷积核宽度、词向量维度),1(图像通道数), num_filters(输出通道数)
                # 卷积输出 :batch_size * (sequence_length-filter_size+1) *  1 * num_filters
                # 池化尺寸 :             (sequence_length-filter_size+1) *  1
                # 池化输出 :batch_size * 1 * 1 * num_filters  三维矩阵

                # Convolution Layer
                # num_filters个(输出通道数)大小为filter_size*embedding_size的卷积核,输入通道数为1
                filter_shape = [filter_size, embedding_size, 1, num_filters]
                # 卷积核宽、高、输入通道数、输出通道数—深度(卷积核的个数)
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
                conv = tf.nn.conv2d(
                    self.embedded_chars_expanded, # 输入、卷积参数、步长、填充方式-不填0
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",# VALID窄卷积,SAME为等长卷积
                    name="conv")
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                # pooled 为 batch_size * 1 * num_filters。其中1个pool值为一个列表,比如说batch_size=3,num_filters = 2
                # pooled:[  [[1],[2]],  [[3],[4]],  [[5],[6]] ]  # 最里面的元素表示一个filter得到的一个特征值

                # [[3,4,5],[1,2,0]],每个元素为一个batch的num_filters个pool值
                pooled_outputs.append(pooled)  # 每个样本pooled_outputs中含有num_filters个数量的特征
                # pooled_outputs为五维矩阵[   ]
                #[len(filter_sizes), batch, height, width, channels = 1] # 对width,即词的角度串联

        # Combine all the pooled features
        num_filters_total = num_filters * len(filter_sizes)
        self.h_pool = tf.concat(pooled_outputs, 3)
        # 将不同核尺寸的对应特征进行拼接,比如说[[3]]与[[4]]拼接后就是[[34]]

        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
        # 扁平化,压成一维,维度为batch_size * 卷积核总数

        # Add dropout
        with tf.name_scope("dropout"): # dropout层,对池化后的结果做dropout
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)


        # Final (unnormalized) scores and predictions
        with tf.name_scope("output"):
            W = tf.get_variable(
                "W",
                shape=[num_filters_total, num_classes],
                initializer=tf.contrib.layers.xavier_initializer())
            b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores") # matmul(x, weights) + biases.
            self.predictions = tf.argmax(self.scores, 1, name="predictions") # 返回索引

        # Calculate mean cross-entropy loss
        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
            #losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.scores,labels=tf.argmax(self.input_y))
            # 交叉熵
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")

train.py文件

#! /usr/bin/env python
# 网络训练
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn

# 命令行参数
# ==================================================
# Vocabulary Size: 3658
# Train/Dev split: 2976/330

# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", 0.1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")

tf.flags.DEFINE_string("corpus_train_path",r'I:\代码\tensorflow\cnn-text-classification-tf-master\cnn-text-classification-tf-master\data\text_classification_data\train',"训练集路径")
tf.flags.DEFINE_string("stop_list_Path",r'I:\代码\tensorflow\cnn-text-classification-tf-master\cnn-text-classification-tf-master\data\text_classification_data\stop\stopword.txt','停用词路径')



# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")

# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
# 每100个batch后在验证集上评估一次
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
# 每100个batch后次保存checkpoint,即模型参数
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")

FLAGS = tf.flags.FLAGS
# FLAGS._parse_flags()
#FLAGS.flag_values_dict()
# print("\nParameters:")
# for attr, value in sorted(FLAGS.__flags.items()):
#     print("{}={}".format(attr.upper(), value))
# print("")

# 数据预处理,包括:构建词典,将文本变成词id序列,随机打乱数据、划分数据集(训练集、验证集、可用交叉验证)
def preprocess():
    # Data Preparation
    # ==================================================

    # Load data
    print("Loading data...")
    x_text, y = data_helpers.load_data_and_labels_3_27(FLAGS.corpus_train_path,FLAGS.stop_list_Path)
    # x_text 为训练集,以列表的形式存储,每一个元素为字符串,处理过的文本
    # y为每个文本对应的标签,以列表存储,每个元素为一个列表(one_hot向量表示)

    # Build vocabulary 词典的id是从1开始的,0是留给那些未出现的词以及作填充长度用的
    max_document_length = max([len(x.split(" ")) for x in x_text]) # 统计每个文本的长度,并获得文本最大的长度
    vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
    # 构建词典,文本中每个词分配个id
    x = np.array(list(vocab_processor.fit_transform(x_text)))
    # 将文本表示成词id的形式。 x为numpy.ndarray  类似[[1 2 3 0] [4 5 0 0]]这种形式

    # Randomly shuffle data
    np.random.seed(10)
    shuffle_indices = np.random.permutation(np.arange(len(y)))
    x_shuffled = x[shuffle_indices]
    y_shuffled = np.array(y)[shuffle_indices]

    # Split train/test set
    # TODO: This is very crude, should use cross-validation
    # 这里只是简单的划分,对于小数据来说,应该使用交叉验证
    dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
    x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
    y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]

    del x, y, x_shuffled, y_shuffled

    # 获取词典的大小以及训练集、验证集的比例
    print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
    print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
    return x_train, y_train, vocab_processor, x_dev, y_dev

def train(x_train, y_train, vocab_processor, x_dev, y_dev):
    # Training
    # ==================================================

    with tf.Graph().as_default():
        # tf.ConfigProto用在创建session的时候,用来对session进行参数配置
        session_conf = tf.ConfigProto(
          allow_soft_placement=FLAGS.allow_soft_placement, #  记录设备指派情况
          log_device_placement=FLAGS.log_device_placement) # 自动选择运行设备
        sess = tf.Session(config=session_conf)
        with sess.as_default():
            cnn = TextCNN(
                sequence_length=x_train.shape[1],
                num_classes=y_train.shape[1],
                vocab_size=len(vocab_processor.vocabulary_),
                embedding_size=FLAGS.embedding_dim,
                filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
                num_filters=FLAGS.num_filters,
                l2_reg_lambda=FLAGS.l2_reg_lambda)

            # Define Training procedure
            global_step = tf.Variable(0, name="global_step", trainable=False)
            # 初始化为0,每次自动加1
            optimizer = tf.train.AdamOptimizer(1e-3)
            # 计算梯度,返回元组列表[ (gradient,variable)..]
            grads_and_vars = optimizer.compute_gradients(cnn.loss)
            # 根据梯度更新变量
            train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
            # 注意:每次更新参数都会加1次,这里global_step=global_step这个操作会自动加1.
            #train_op = optimizer.minimize(cnn.loss,global_step=global_step) 上面两个步骤等价于这一步,分开方便对梯度进行约束

            # Keep track of gradient values and sparsity (optional) 可视化
            # 查看一个张量在训练过程中值的分布情况时,可通过tf.summary.histogram()将其分布情况以直方图的形式在TensorBoard直方图仪表板上显示
            grad_summaries = []
            for g, v in grads_and_vars:  # [ (gradient,variable)..] g:梯度,v:变量
                if g is not None:
                    grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
                    sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
                    grad_summaries.append(grad_hist_summary)
                    grad_summaries.append(sparsity_summary)
            grad_summaries_merged = tf.summary.merge(grad_summaries)

            # Output directory for models and summaries
            timestamp = str(int(time.time()))
            out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
            print("Writing to {}\n".format(out_dir))

            # tf.summary()的各类方法,能够保存训练过程以及参数分布图并在tensorboard显示。

            # Summaries for loss and accuracy
            loss_summary = tf.summary.scalar("loss", cnn.loss)
            acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)

            # Train Summaries
            train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
            train_summary_dir = os.path.join(out_dir, "summaries", "train")
            train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)

            # Dev summaries
            dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
            dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
            dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)

            # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
            checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
            checkpoint_prefix = os.path.join(checkpoint_dir, "model")
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)
            saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)

            # Write vocabulary
            vocab_processor.save(os.path.join(out_dir, "vocab"))

            # Initialize all variables
            sess.run(tf.global_variables_initializer())

            # 定义了一个函数,输入为1个batch
            def train_step(x_batch, y_batch):
                """
                A single training step
                """
                feed_dict = {
                  cnn.input_x: x_batch,
                  cnn.input_y: y_batch,
                  cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
                }
                _, step, summaries, loss, accuracy = sess.run(
                    [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
                    feed_dict)
                # #梯度更新(更新模型),步骤加一,存储数据,计算一个batch的损失,计算一个batch的准确率
                time_str = datetime.datetime.now().isoformat()
                print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
                # 注意:这里的step是第几次更新参数,即一个batch更新一次参数,并不是num_epochs次数
                # step总次数的计算过程为 每一轮含有的batch数目(文本总数量/一个batch含有的文本的数量)*总轮数(200)
                # (2976/64)=46.40 = 47, 47 * 200 =9400
                train_summary_writer.add_summary(summaries, step)

            # 定义了一个函数,用于验证集,输入为一个batch
            def dev_step(x_batch, y_batch, writer=None):
                """
                Evaluates model on a dev set
                """
                feed_dict = {
                  cnn.input_x: x_batch,
                  cnn.input_y: y_batch,
                  cnn.dropout_keep_prob: 1.0
                }
                step, summaries, loss, accuracy = sess.run(
                    [global_step, dev_summary_op, cnn.loss, cnn.accuracy],
                    feed_dict)
                time_str = datetime.datetime.now().isoformat()
                print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
                if writer:
                    writer.add_summary(summaries, step)


            # Generate batches(生成器),得到一个generator,每一次返回一个batch,没有构成list[batch1,batch2,batch3,...]

            batches = data_helpers.batch_iter(
                list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
            # 这里控制迭代次数
            # Training loop. For each batch...
            for batch in batches:
                x_batch, y_batch = zip(*batch)
                train_step(x_batch, y_batch)
                current_step = tf.train.global_step(sess, global_step)
                if current_step % FLAGS.evaluate_every == 0:
                    print("\nEvaluation:")
                    dev_step(x_dev, y_dev, writer=dev_summary_writer)
                    print("")
                if current_step % FLAGS.checkpoint_every == 0:
                    path = saver.save(sess, checkpoint_prefix, global_step=current_step)
                    print("Saved model checkpoint to {}\n".format(path))

def main(argv=None):
    x_train, y_train, vocab_processor, x_dev, y_dev = preprocess()
    train(x_train, y_train, vocab_processor, x_dev, y_dev)

if __name__ == '__main__':
    tf.app.run()

eval.py文件

#! /usr/bin/env python
# 预测与评估
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
import csv

# Parameters
# ==================================================

# Data Parameters
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")

tf.flags.DEFINE_string("Test_corpus",r"I:\代码\tensorflow\cnn-text-classification-tf-master\cnn-text-classification-tf-master\data\text_classification_data\test","测试语料集")
tf.flags.DEFINE_string("stop_list_Path",r'I:\代码\tensorflow\文本分类\分类数据\text_classification\stop\stopword.txt','停用词路径')

# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", r"I:\代码\tensorflow\cnn-text-classification-tf-master\cnn-text-classification-tf-master\runs\1553692717\checkpoints", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", True, "Evaluate on all training data")

# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")


FLAGS = tf.flags.FLAGS
#FLAGS._parse_flags()
# FLAGS.flag_values_dict()
# print("\nParameters:")
# for attr, value in sorted(FLAGS.__flags.items()):
#     print("{}={}".format(attr.upper(), value))
# print("")

# CHANGE THIS: Load data. Load your own data here
# if里面放的自己的测试集 eval_train = True时
if FLAGS.eval_train:
    #x_raw, y_test = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
    x_raw, y_test = data_helpers.load_data_and_labels_3_27(FLAGS.Test_corpus,FLAGS.stop_list_Path)
    y_test = np.argmax(y_test, axis=1)  # 类别变成0,1,2,3。分别对应女性、体育、文学、校园、
else:
    x_raw = ["a masterpiece four years in the making", "everything is off."]
    y_test = [1, 0]
    #y_test = [2, 0]

# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab") # ..表示上一级
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))

print("\nEvaluating...\n")

# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
    session_conf = tf.ConfigProto(
      allow_soft_placement=FLAGS.allow_soft_placement,
      log_device_placement=FLAGS.log_device_placement)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        # Load the saved meta graph and restore variables
        saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
        saver.restore(sess, checkpoint_file)

        # Get the placeholders from the graph by name
        input_x = graph.get_operation_by_name("input_x").outputs[0]
        # input_y = graph.get_operation_by_name("input_y").outputs[0]
        dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]

        # Tensors we want to evaluate
        predictions = graph.get_operation_by_name("output/predictions").outputs[0]

        # Generate batches for one epoch
        batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)

        # Collect the predictions here
        all_predictions = []

        for x_test_batch in batches:
            batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
            all_predictions = np.concatenate([all_predictions, batch_predictions])

# Print accuracy if y_test is defined
if y_test is not None:
    correct_predictions = float(sum(all_predictions == y_test))
    print("Total number of test examples: {}".format(len(y_test)))
    print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))

# Save the evaluation to a csv
# 待预测的已经分词的文本,预测的类别,正确的类别,bo(预测正确返回True,否则返回False)
bo = []
for i in range(len(y_test)):
    if all_predictions[i]==y_test[i]:
        bo.append("True")
        #bo.append(" ")
    else :
        bo.append(False)
#predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))
predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions,y_test,bo))
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w',encoding='utf-8') as f:
    csv.writer(f).writerows(predictions_human_readable)

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章