經典卷積網絡模型LeNet-5模型來解決MNIST數字識別問題(主要解決驗證集正確率低的問題)

LeNet-5模型不是重點,重點是我當時遇到的問題,不知道你遇到了沒?是不是發現你訓練的正確率跟書本上或者正常情況下的相差甚遠,尤其是在驗證集上的正確率我當時才0.1,而我參考的那本書(《TensorFlow實戰Google深度學習框架》)上的正確率是0.99!

解決辦法:當時網上查找原因,下面這篇博客https://blog.csdn.net/wangdong2017/article/details/90176323說的很詳細。但是沒能解決。我的解決辦法是調學習率,簡單粗暴的辦法。將mnist_train.py中的基礎學習率修改爲LEARNING_RATE_BASE = 0.01就OK啦。

這個模型很老了,我這裏直接上代碼吧!

mnist_inference.py中的代碼:

import tensorflow as tf

#定義神經網絡的相關參數
INPUT_NODE = 784
OUTPUT_NODE = 10

IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10

#第一層卷積層的尺寸和深度
CONV1_DEEP = 32
CONV1_SIZE = 5
#第二層卷積層的深度和尺寸
CONV2_DEEP = 64
CONV2_SIZE = 5
#全連接層的節點個數
FC_SIZE = 512

# def get_weight_variable(shape,regularizer):
#     weights = tf.get_variable('weights',shape,
#                               initializer=tf.truncated_normal_initializer(stddev=0.1))
#     if regularizer != None:
#         tf.add_to_collection('losses',regularizer(weights))
#     return weights

#定義卷積神經網絡的前向傳播過程,這裏新添加了一個參數train用於區分訓練過程和測試過程
def inference(input_tensor,train,regularizer):
    #聲明第一層卷積層的變量並實現前向傳播的過程。
    with tf.variable_scope('layer1_conv1'):
        conv1_weights = tf.get_variable(
            "weight",[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],
            initializer=tf.truncated_normal_initializer(stddev=0.1)
        )
        conv1_biases = tf.get_variable(
            'biases',[CONV1_DEEP],initializer=tf.constant_initializer(0.0))
        #使用邊長爲5,深度爲32的過濾器,過濾器移動步長爲1,使用全零填充
        conv1 = tf.nn.conv2d(
            input_tensor,conv1_weights,strides=[1,1,1,1],padding="SAME")
        reul1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))

    #類似的聲明第二層池化層的前向傳播過程。
    #選用最大池化層,池化過濾器的邊長爲2,全零填充,步長爲2。
    with tf.name_scope('layer2-pool1'):
       pool1 = tf.nn.max_pool(
           reul1,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")

    #聲明第三層卷積層的變量並實現前向傳播過程。
    with tf.variable_scope('layer3-conv2'):
        conv2_weights = tf.get_variable(
            'weight',[CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable(
            'bias',[CONV2_DEEP],
            initializer=tf.constant_initializer(0.0))
        #使用邊長爲5,深度爲64的過濾器,過濾器移動步長爲1,使用全零填充
        conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1,1,1,1],padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))

    #實現第四層 池化層的前向傳播過程。這一層和第二層的結構一樣
    with tf.name_scope('layer4-pool2'):
        pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

    #第5層爲全連接層,將第四層的池化層的輸出轉化爲第5層的輸入格式。
    #第四層是7*7*64的矩陣 第5層是輸入格式爲向量,需要將第四層的矩陣拉成向量
    pool_shape = pool2.get_shape().as_list()
    #pool_shape[0]爲一個batch中數據的個數
    nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]
    #通過tf.reshape函數將第四層變成一個batch的向量
    reshaped = tf.reshape(pool2,[pool_shape[0],nodes])

    #聲明第5層全連接層的變量並實現前向傳播的過程
    with tf.variable_scope('layer5-fc1'):
        fc1_weights = tf.get_variable(
            'weight',[nodes,FC_SIZE],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        #只有全連接層的權值需要加入正則化
        if regularizer != None:
            tf.add_to_collection('losses',regularizer(fc1_weights))
        fc1_biases = tf.get_variable(
            'bias',[FC_SIZE],initializer=tf.constant_initializer(0.1))

        fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights) + fc1_biases)
        if train:
            fc1 = tf.nn.dropout(fc1,0.5)

    #聲明第六層全連接層的變量並實現前向傳播的過程
    with tf.variable_scope('layer6-fc2'):
        fc2_weights = tf.get_variable(
            'weight', [FC_SIZE, NUM_LABELS],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        # 只有全連接層的權值需要加入正則化
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(fc2_weights))
        fc2_biases = tf.get_variable(
            'bias', [NUM_LABELS], initializer=tf.constant_initializer(0.1))

        logit = tf.matmul(fc1,fc2_weights)+fc2_biases

    #返回第六層的輸出
    return logit

mnist_train.py中的代碼:

import tensorflow as tf
import numpy as np
import os
from tensorflow.examples.tutorials.mnist import input_data

#加載mnist_inference.py中定義的常量和前向傳播的函數
import mnist_inference

#配置神經網絡的參數
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99

#模型保存的路徑和文件名
MODEL_SAVE_PATH = 'model'
MODEL_NAME = 'model.ckpt'

def train(mnist):
    #將處理的輸入數據的計算都放在名字爲input的命名空間下
    with tf.name_scope('input'):
        #定義輸入輸出placeholder
        x = tf.placeholder(tf.float32,
                           [BATCH_SIZE,
                            mnist_inference.IMAGE_SIZE,
                            mnist_inference.IMAGE_SIZE,
                            mnist_inference.NUM_CHANNELS
                           ],
                           name='x-input')
        y_ = tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE],name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)#L2正則化
    #直接使用mnsit_inference中定義的前向傳播過程
    y = mnist_inference.inference(x,True,regularizer)
    global_step = tf.Variable(0,trainable=False)

    #定義損失函數、學習率、滑動平均操作及訓練過程
    #將處理滑動平均相關的計算都放在moving_average的命名空間下
    with tf.name_scope('moving_average'):
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY,global_step)
        variable_averages_op = variable_averages.apply(tf.trainable_variables())
    #將計算損失函數相關的計算放在名爲loss_function的命名空間下
    with tf.name_scope('loss_function'):
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y,labels=tf.argmax(y_,1))
        cross_entropy_mean = tf.reduce_mean(cross_entropy)
        loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    #將剩下的放在'train_step'
    with tf.name_scope('train_step'):
        learning_rate = tf.train.exponential_decay(
            LEARNING_RATE_BASE,
            global_step,
            mnist.train.num_examples / BATCH_SIZE,
            LEARNING_RATE_DECAY
        )
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
            loss,global_step=global_step)
        with tf.control_dependencies([train_step,variable_averages_op]):
            train_op = tf.no_op(name='train')

    #初始化TensorFlow持久化層
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs,ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(xs,(BATCH_SIZE,
                                         mnist_inference.IMAGE_SIZE,
                                         mnist_inference.IMAGE_SIZE,
                                         mnist_inference.NUM_CHANNELS
                                         ))
            _,loss_value,step = sess.run([train_op,loss,global_step],
                                         feed_dict={x:reshaped_xs,y_:ys})
            #每1000輪保存一次模型
            if i%1000 == 0:
                print('After %d training step(s),loss on training batch is %g.'%(step,loss_value))
                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step)
    #將當前的計算圖輸出到TensorBoard日誌文件
    writer = tf.summary.FileWriter('log',tf.get_default_graph())
    writer.close()

def main(argv = None):
    mnsit = input_data.read_data_sets('mnsit_data',one_hot=True)
    train(mnsit)

if __name__ == '__main__':
    tf.app.run()

 mnist_eval.py中的代碼:

import tensorflow as tf
import time
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

#加載mnist_inference.py中定義的常量和前向傳播的函數
import mnist_inference
import mnist_train

#每10秒加載一次最新的模型,並在測試集上測試最新模型的正確率
EVAL_INTERVAL_SECS = 10

def evaluate(mnist):
    with tf.Graph().as_default() as g:
        #定義輸入輸出placeholder
        x = tf.placeholder(tf.float32,
                           [mnist.validation.images.shape[0],
                            mnist_inference.IMAGE_SIZE,
                            mnist_inference.IMAGE_SIZE,
                            mnist_inference.NUM_CHANNELS],
                           name='x-input')
        # x = tf.placeholder(
        #     tf.float32,[None,mnist_inference.INPUT_NODE],name='x-input'
        # )
        y_ = tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE],name='y-input')

        xs = mnist.validation.images
        reshaped_xs = np.reshape(xs,[mnist.validation.images.shape[0],
            mnist_inference.IMAGE_SIZE,
            mnist_inference.IMAGE_SIZE,
            mnist_inference.NUM_CHANNELS])

        validate_feed = {x:reshaped_xs,
                         y_:mnist.validation.labels}

        #直接使用mnsit_inference中定義的前向傳播過程
        y = mnist_inference.inference(x,None,None)
        #使用前向傳播的結果計算正確率。如果需要對未知的樣例進行分類,那麼使用
        #tf.argmax(y,1)就可以得到輸入樣例的預測類別
        correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

        #定義損失函數、學習率、滑動平均操作及訓練過程
        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                #加載模型
                saver.restore(sess,ckpt.model_checkpoint_path)
                #通過文件名得到當時迭代的輪數
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                accuracy_score = sess.run(accuracy,feed_dict=validate_feed)

                print('After %s training step(s),validation accuracy= %g.'%(global_step,accuracy_score))
            else:
                print('No checkpoint file found')
                return

        #每隔10秒調用一次計算正確率以檢測訓練過程中正確率的變化
        # while True:
        #     with tf.Session() as sess:
        #         ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
        #         if ckpt and ckpt.model_checkpoint_path:
        #             #加載模型
        #             saver.restore(sess,ckpt.model_checkpoint_path)
        #             #通過文件名得到當時迭代的輪數
        #             global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        #             accuracy_score = sess.run(accuracy,feed_dict=validate_feed)
        #
        #             print('After %s training step(s),validation accuracy= %g.'%(global_step,accuracy_score))
        #         else:
        #             print('No checkpoint file found')
        #             return
        #     time.sleep(EVAL_INTERVAL_SECS)

def main(argv = None):
    mnsit = input_data.read_data_sets('mnsit_data',one_hot=True)
    evaluate(mnsit)

if __name__ == '__main__':
    tf.app.run()
下面是我的文件結構:
圖1  文件結構
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章