mnist_inference.py
#coding:utf-8
import tensorflow as tf
# 定義神經網絡結構相關的參數
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
# 通過tf.get_variable函數來獲取變量
def get_weight_variable(shape, regularizer):
weights = tf.get_variable(
"weights", shape,
initializer=tf.truncated_normal_initializer(stddev = 0.1))
# 當給出了正則化生成函數時,將當前變量的正則化損失加入名字爲losses的集合。
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights
# 定義神經網絡的前向傳播過程
def inference(input_tensor, regularizer):
# 聲明第一層神經網絡的變量並完成前向傳播過程
with tf.variable_scope('layer1'):
# 這裏通過tf.get_variable或tf.Variable沒有本質區別,因爲在訓練或者是測試過程中沒有在同一個程序中多次調用這個函數
# 如果在同一個程序中多次調用,在第一次調用之後需要將reuse設置爲True
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
#類似的聲明第二層神經網絡的變量並完成前向傳播過程
with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
#返回最後前向傳播的結果
return layer2
mnist_train.py
#coding:utf-8
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 加載mnist_inference.py中定義的常量和前向傳播的函數
import mnist_inference
# 配置神經網絡的參數
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
#模型保存的路徑和文件名
MODEL_SAVE_PATH = "model/"
MODEL_NAME = "model.ckpt"
def train(mnist):
# 定義輸入輸出placeholder
x = tf.placeholder(
tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(
tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
#直接使用mnist_inference.py中定義的前向傳播過程
y = mnist_inference.inference(x , regularizer)
global_step = tf.Variable(0, trainable=False)
#定義損失函數、學習率、滑動平均操作以及訓練過程
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_,1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples/BATCH_SIZE,
LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train')
#初始化Tensorflow持久化類
saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run()
# 在訓練過程中不再測試模型在驗證數據集上的表現,驗證和測試的過程將會有一個獨立的程序來完成
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs, y_:ys})
# 每1000輪保存一次模型
if i % 1000 == 0:
# 輸出當前的訓練情況。這裏只輸出了模型在當前訓練batch上的損失函數大小。
# 通過損失函數的大小可以大概瞭解訓練的情況,在驗證數據集上的正確率信息可以大概瞭解訓練的情況
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
#保存當前的模型。這裏給出了global_step參數,這樣可以讓每個被保存模型的文件名末尾加上訓練的輪數
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def main(argv=None):
mnist = input_data.read_data_sets("/tmp/data",one_hot=True)
train(mnist)
if __name__ == '__main__':
tf.app.run()
mnist_eval.py
#coding:utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 加載mnist_inference.py和mnist_train.py中定義的常量和函數
import mnist_inference
import mnist_train
# 每十秒加載一次最新的模型,並在測試數據上測試最新模型的正確率
EVAL_INTERVAL_SECS = 10
def evaluate(mnist):
with tf.Graph().as_default() as g:
# 定義輸入輸出的格式
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
validate_feed = {x:mnist.validation.images,
y_:mnist.validation.labels}
# 直接通過調用封裝好的函數來計算前向傳播的結果,因爲測試時不關注正則化損失的值,所以這裏用於計算正則化損失的函數被設置爲None
y = mnist_inference.inference(x, None)
# 使用前向傳播的結果計算正確率,如果需要對未知的樣例進行分類,那麼使用tf.argmax(y, 1)就可以得到輸入樣例的預測類別了
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #將布爾型轉爲float
# 通過變量重命名的方式來加載模型,這樣在前向傳播的過程中就不需要調用求滑動平均的函數來獲取平均值了
variable_average = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variable_to_restore = variable_average.variables_to_restore()
saver = tf.train.Saver(variable_to_restore)
# 每隔EVAL_INTERVAL_SECS秒調用一次計算正確率的過程以檢測訓練過程中正確率的變化
while True:
with tf.Session() as sess:
# tf.train.get_checkpoint_state函數會通過checkpoint文件自動找到目錄中最新模型的文件名
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
# 加載模型
saver.restore(sess, ckpt.model_checkpoint_path)
# 通過文件名得到模型保存時迭代的輪數
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training step(s), validation accuracy = %g"%(global_step, accuracy_score))
else:
print("No checkpoint file found")
return
time.sleep(EVAL_INTERVAL_SECS)
def main(argv=None):
mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
evaluate(mnist)
if __name__ == '__main__':
tf.app.run()
代碼出自“TensorFlow實戰Google深度學習框架”