本文使用tensorflow實現在mnist數據集上的圖片訓練和測試過程,使用了簡單的兩層神經網絡,代碼中涉及到的內容,均以備註的形式標出。
關於文中的數據集,大家如果沒有下載下來,可以到我的網盤去下載,鏈接如下:
https://pan.baidu.com/s/1KU_YZhouwk0h9MK0xVZ_QQ
下載下來後解壓到F盤mnist文件夾下,或者自己選擇文件存儲位置,然後在下面代碼的相應位置改過來即可。
直接上代碼:
import tensorflow as tf
import numpy as np
#引入input_mnist
from tensorflow.examples.tutorials.mnist import input_data
#加載mnist信息,獲得訓練和測試圖片以及對應標籤
mnist = input_data.read_data_sets('F:/mnist/data/',one_hot = True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.test.images
testlabel = mnist.test.labels
print("MNIST LOAD READY")
#輸入圖片尺寸28*28
n_input = 784
#輸出類別數
n_output = 10
#初始化權重
weights = {
#卷積層參數,採用高斯初始化
'wc1':tf.Variable(tf.random_normal([3,3,1,64],stddev = 0.1)),
'wc2':tf.Variable(tf.random_normal([3,3,64,128],stddev=0.1)),
#全連接層參數
'wd1':tf.Variable(tf.random_normal([7*7*128,1024],stddev=0.1)),
'wd2':tf.Variable(tf.random_normal([1024,n_output],stddev=0.1))
}
#初始化偏置
biases = {
'bc1':tf.Variable(tf.random_normal([64],stddev = 0.1)),
'bc2':tf.Variable(tf.random_normal([128],stddev=0.1)),
'bd1':tf.Variable(tf.random_normal([1024],stddev=0.1)),
'bd2':tf.Variable(tf.random_normal([n_output],stddev=0.1))
}
#定義前向傳播函數
def conv_basic(_input,_w,_b,_keepratio):
#輸入
#reshape()中的-1表示不用我們指定,讓函數自己計算
_input_r = tf.reshape(_input,shape = [-1,28,28,1])
#CONV1
_conv1 = tf.nn.conv2d(_input_r,_w['wc1'],strides=[1,1,1,1],padding='SAME')
_conv1 = tf.nn.relu(tf.nn.bias_add(_conv1,_b['bc1']))
_pool1 = tf.nn.max_pool(_conv1,ksize = [1,2,2,1],strides = [1,2,2,1],padding='SAME')
#Dropout層既可以使用在全連接層之後,也可以使用在每層之後,這裏在每層之後都加了Dropout
_pool_dr1 = tf.nn.dropout(_pool1,_keepratio)
#CONV2
#conv2d計算二維卷積
_conv2 = tf.nn.conv2d(_pool_dr1,_w['wc2'],strides=[1,1,1,1],padding='SAME')
_conv2 = tf.nn.relu(tf.nn.bias_add(_conv2,_b['bc2']))
_pool2 = tf.nn.max_pool(_conv2,ksize = [1,2,2,1],strides = [1,2,2,1],padding='SAME')
_pool_dr2 = tf.nn.dropout(_pool2,_keepratio)
#向量化 全連接層輸入 得到wd1層的7*7*128的shape 然後轉化爲向量
_dense1 = tf.reshape(_pool_dr2,[-1,_w['wd1'].get_shape().as_list()[0]])
#FULL CONNECTION1
_fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1,_w['wd1']),_b['bd1']))
_fc_dr1 = tf.nn.dropout(_fc1,_keepratio)
#FULL CONNECTION2
_out = tf.add(tf.matmul(_fc_dr1,_w['wd2']),_b['bd2'])
#輸出字典
out = {'input_r':_input_r,'conv1':_conv1,'pool1':_pool1,'pool1_dr1':_pool_dr1,
'conv2':_conv2,'pool2':_pool2,'pool_dr2':_pool_dr2,'dense1':_dense1,
'fc1':_fc1,'fc_dr1':_fc_dr1,'out':_out
}
return out
print("CNN READY")
a = tf.Variable(tf.random_normal([3,3,1,64],stddev=0.1))
print(a)
a = tf.Print(a,[a],"a: ")
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
#填充
x = tf.placeholder(tf.float32,[None,n_input])
y = tf.placeholder(tf.float32,[None,n_output])
keepratio = tf.placeholder(tf.float32)
#進行一次前向傳播
_pred = conv_basic(x,weights,biases,keepratio)['out']
#計算損失
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = _pred,labels=y))
#定義優化器
optm = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(cost)
#比較預測的標籤和真實標籤是否一致,一致返回True,不一致返回False
#argmax找到給定的張量tensor中在指定軸axis上的最大值/最小值的位置,0爲每一列,1爲每一行
_corr = tf.equal(tf.argmax(_pred,1),tf.argmax(y,1))
#True轉化爲1 False爲0
accr = tf.reduce_mean(tf.cast(_corr,tf.float32))
#每1個epoch保存一次
save_step = 1
#max_to_keep最終只保留三組模型,即(12 13 14)
saver = tf.train.Saver(max_to_keep=3)
#控制訓練還是測試
do_train=1
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
#訓練15個epoch
training_epochs = 15
batch_size = 16
display_step = 1
#訓練過程
if do_train == 1:
for epoch in range(training_epochs):
avg_cost=0.
total_batch = 10
#迭代優化
for i in range(total_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(optm,feed_dict = {x:batch_xs,y:batch_ys,keepratio:0.7})
avg_cost += sess.run(cost,feed_dict={x:batch_xs,y:batch_ys,keepratio:1.})/total_batch
#打印信息
if (epoch+1) % display_step ==0:
print("Epoch:%03d/%03d cost:%.9f"%(epoch,training_epochs,avg_cost))
train_acc = sess.run(accr,feed_dict = {x:batch_xs,y:batch_ys,keepratio:1.})
print("Train accuracy:%.3f"%(train_acc))
#保存模型
if epoch % save_step == 0:
saver.save(sess,"F:/mnist/data/model.ckpt-"+str(epoch))
#測試(cpu版本慢的要死 電腦都快要被卡死了...)
if do_train == 0:
#epoch = 15 減1之後即加載第14個模型
epoch = training_epochs-1
#讀取模型
saver.restore(sess,"F:/mnist/data/model.ckpt-"+str(epoch))
#打印測試精度
test_acc = sess.run(accr,feed_dict={x:testimg,y:testlabel,keepratio:1.})
print("test accr is:%.3f"%(test_acc))
print("Optimization Finished")
訓練的部分過程如下:
測試過程如下:
測試時只需修改do_train==0 即可。如果使用Anaconda的spyder的話,記得測試之前先restart kennel一下。