NLP分類常用模型(一):一維卷積tf.layers.conv1d() + 全連接tf.layers.dense()

class TextCNN(object):
    """文本分類,CNN模型"""

    def __init__(self):
        # 三個待輸入的數據
        self.x = tf.placeholder(tf.int32, [None, config.max_seq_length], name='input_x')
        self.y = tf.placeholder(tf.float32, [None, config.lables], name='input_y')
        self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
        self.cnn()

    def cnn(self):
        """CNN模型"""
        # 詞向量映射
        with tf.device('/cpu:0'):
        # with tf.name_scope("emd"):
            embedding = tf.get_variable('embedding', [config.vocab_size, config.embedding_dim])
            embedding_inputs = tf.nn.embedding_lookup(embedding, self.y)

        with tf.name_scope("cnn"):
            # CNN layer
            conv = tf.layers.conv1d(embedding_inputs, config.num_filters, config.kernel_size, name='conv')
            # global max pooling layer
            gmp = tf.reduce_max(conv, reduction_indices=[1], name='gmp')

        with tf.name_scope("score"):
            # 全連接層,後面接dropout以及relu激活
            fc = tf.layers.dense(gmp, config.conn_hidden_dim_1, name='fc1')
            fc = tf.contrib.layers.dropout(fc, self.keep_prob)
            fc = tf.nn.relu(fc)

            # 分類器
            self.logits = tf.layers.dense(fc, config.lables, name='fc2')
            self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1)  # 預測類別

        with tf.name_scope("optimize"):
            # 損失函數,交叉熵
            cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y)
            self.loss = tf.reduce_mean(cross_entropy)
            # 優化器
            self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss)

        with tf.name_scope("accuracy"):
            # 準確率
            correct_pred = tf.equal(tf.argmax(self.y, 1), self.y_pred_cls)
            self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章