MNIST手寫數字識別

手寫數字識別是入門的教程,利用這個學習一下深度學習原理以及tensorflow的使用

1、前向過程 mnist_inference.py

#coding:utf-8

import tensorflow as tf

#定義神經網絡結構相關的參數
INPURT_NODE = 784
OUTPUT_NIDE = 10

IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10

#第一層卷積層的尺寸和深度
CONV1_SIZE = 5
CONV1_DEEP = 32

#第二層的卷積層的尺寸和深度
CONV2_SIZE = 5
CONV2_DEEP = 64

#全連接層的節點個數
FC_SIZE = 512


def inference(input_tensor,train,regularizer):
    with tf.variable_scope("layer1-conv1"):
        conv1_weights = tf.get_variable("weight",[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias",[CONV1_DEEP],initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides = [1,1,1,1],padding = "SAME")
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))



    with tf.variable_scope("layer2-pool1"):
        pool1 = tf.nn.pool(relu1,ksize = [1,2,2,1],strides=[1,2,2,1],padding = "SAME")


    with tf.variable_scope("layer3-conv2"):
        conv2_weights = tf.get_variable("weight",[CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("bias",[CONV2_DEEP],initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(relu1,conv2_weights,[1,1,1,1],padding="SAME")
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))


    with tf.variable_scope("layer4-pool2"):
        pool2 = tf.nn.pool(relu2,kszie=[1,2,2,1],strides=[1,2,2,1],padding="SAME")



    pool_shape = pool2.get_shape().as_list()
    nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]

    reshaped = tf.reshape(pool2,[pool_shape[0],nodes])

    with tf.variable_scope("layer5-fc1"):
        fc1_weights = tf.get_variable("weight",[nodes,FC_SIZE],initializer=tf.truncated_normal_initializer(stddev=0.1))



    if regularizer !=None:

        tf.add_to_collection("losses",regularizer(fc1_weights))

        fc1_biases = tf.get_variable("bias",[FC_SIZE],initializer=tf.constant_initializer(0.1))
        fc1 = tf.nn.relu(tf.matmul(reshape,fc1_weights)+fc1_biases)
        #一般只在全連接層進行dropout操作,而不在卷積層或者池化層
        if train:
            fc1 = tf.nn.dropout(fc1,0.7)

    with tf.variable_scope("layer6-fc2"):

        fc2_weights = tf.get_variable("weight",[FC_SIZE,NUM_LABELS],initializer=tf.truncated_normal_initializer(stddev=0.1))

        if regularizer != None:
            tf.add_to_collection("losses",regularizer(fc2_weights))

        fc2_biases=tf.get_variable("bias",[NUM_LABELS],initializer=tf.constant_initializer(0.0))

        logit = tf.matmul(fc1,fc2_weights)+fc2_biases

    return logit

2、進行訓練,誤差反向傳播tensorflow內部自動求解,mnsit_train.py,這裏涉及到模型的保存

#coding:utf-8

import tensorflow as tf
import os

from tensorflow.examples.tutorials.mnist import input_data

#加載剛剛些的前向傳播過程
import mnist_inference



#配置神經網絡的參數
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8#指數衰減基礎學習率
LEARNING_RATE_DECAY = 0.99#衰減率
REGULARAZTION_RATE = 0.0001#正則化的權重
TRAIN_STEP = 30000
MOVING_AVERAGE_DECAY = 0.99#滑動平均率

MODEL_SAVE_PATH = "./model"
MODEL_NAME = "model.ckpt"

def train(mnist):
    x = tf.placeholder(tf.float32,[
    BATCH_SIZE,
    mnist_inference.IMAGE_SIZE,
    mnist_inference.IMAGE_SIZE,
    mnist_inference.NUM_CHANNELS],
    name="x-input")

    y_ = tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NIDE],name="y-input")

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)

    y = mnist_inference.inference(x,regularizer)

    global_step = tf.Variable(0,trainable=False)#設置global_step爲不可訓練數值,在訓練過程中它不進行相應的更新

    #對w,b進行滑動平均操作
    variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)#對滑動平均函數進行輸入滑動平均率以及步數
    variable_average_op = variable_average.apply(tf.trainable_variables())#對所以可訓練的參數進行滑動平均操作

    #計算損失函數
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels = y_,logits = y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    loss = cross_entropy_mean+tf.add_n(tf.get_collection("losses"))#這裏計算collection裏的所有的和。之前把w正則化的值放在了collection裏

    #對 學習率 進行指數衰減
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY) 

    #定義訓練過程
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)#每當進行一次訓練global_step會加1

    #一次進行多個操作,既進行反向傳播更新神經網絡中的參數,又更新每一個參數的滑動平均值(滑動平均是影子操作)
    with tf.control_dependencies([train_step,variable_average_op]):
        train_op = tf.no_op(name="train")


    #保存操作
    saver = tf.train.Saver()

    #啓動程序

    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAIN_STEP):
            xs,ys = mnist.train.next_batch(BATCH_SIZE)
            reshapeed_xs = np.reshape(xs,(BATCH_SIZE,
            mnist_inference.IMAGE_SIZE,
            mnist_inference.IMAGE_SIZE,
            mnist_inference.NUM_CHANNELS
            ))
            _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:reshapeed_xs,y_:ys})

            #每1000輪保存一次模型
            if i%1000 ==0:
                print "step ",step,"   ","loss  ",loss_value
                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step = global_step)


def main(argv=None):
    mnist = input_data.read_data_sets("/tmp/data",one_hot=True)
    train(mnist)

if __name__ =="__main__":
    tf.app.run()

3、模型評價,這裏涉及到模型的加載

#coding:utf-8
import tensorflow as tf 
import time 
from tensorflow.examples.tutorials.mnist import input_data

import mnist_inference
import mnist_train

EVAL_INTERVAL_TRAIN = 10

def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32,[None,mnist_inference.INPURT_NODE],name = "x-input")
        y_ = tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NIDE],name = "y-input")

        validfeed = {x:mnist.validation.images,y_:mnist.validation.labels}

        y = mnist_inference.inference(x,None)#前向傳播,這裏不需要對參數使用正則化

        corrent_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
        accuracy = tf.reduce_mean(tf.cast(corrent_prediction,tf.float32))
        ######################################################################
        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        ####################################################################

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)#該函數會自動根據地址找到最新的文件

            if ckpt  and ckpt.model_checkpoint_path:


                #加載模型
                saver.restore(sess,ckpt.model_checkpoint_path)

                #通過文件名稱得到模型保存時迭代的輪數
                global_step = ckpt.model_checkpoint_path.split("/")[-1].split("-")[-1]#
                accuracy_score = sess.run(accuracy,feed_dict=validfeed)

                print "step=  ",global_step,"   accuracy= ",accuracy_score
            else:
                print "no checkpoint file found"
                return


def main(argv=None):
    mnist = input_data.read_data_sets("/tmp/data",one_hot=True)
    evaluate(mnist)


if __name__ =='__main__':
    tf.app.run()

實際使用時訓練與評價(測試)並不是分開的,一般是訓練一定步數,評價一次模型;訓練一定步數之後,保存模型

發佈了32 篇原創文章 · 獲贊 29 · 訪問量 4萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章