利用Tensorflow實現CNN,CNN的原理我就不再多說,直接上代碼
#利用CNN實現一個mnist識別
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#1到10的數字
mnist = input_data.read_data_sets('MNIST_data',one_hot = True)
#計算準確率
def compute_accuracy(v_xs,v_ys):
#定義一個全局變量的準確值
global prediction
y_pre = sess.run(prediction,feed_dict = {xs:v_xs,keep_prob:1})
correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result = sess.run(accuracy,feed_dict = {xs:v_xs,ys:v_ys,keep_prob:1})
return result
#定義一個權值的函數
def weight_variable(shape):
#生成一個隨機值
initial = tf.truncated_normal(shape,stddev = 0.1)
return tf.Variable(initial)
#定義一個偏置量
def bias_variable(shape):
initial = tf.constant(0.1,shape = shape)
return tf.Variable(initial)
def conv2d(x,W):
#strides代表步長,在x方向和y方向都是1
#conv2d這個函數的第一個輸入爲當前層的節點矩陣,即image,第二個參數提供了卷積層的權重,第三個參數爲不同維度的步長
return tf.nn.conv2d(x,W,strides = [1,1,1,1],padding ='SAME')
def max_pool_2x2(x):
#池化層是爲了進一步的減少參數量
#ksize提供了過濾器的尺寸,stride提供了步長信息,padding提供了是否使用全0填充
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding = 'SAME')
#定義placeholder
xs = tf.placeholder(tf.float32,[None,784])#這裏是28x28
ys = tf.placeholder(tf.float32,[None,10])
#keep_prob表示使用drop_out時保留的概率
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs,[-1,28,28,1])
#print(x_image,shape)
#第一個卷積層
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
#搭建,並進行非線性的處理
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)#size 28x28x32
h_pool1 = max_pool_2x2(h_conv1) #輸出的維度爲14x14x32
#第二個卷積層
W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)#這裏變爲14x14x64
h_pool2 = max_pool_2x2(h_conv2)#變成7x7x64
#然後是全連接層
W_fc1 = weight_variable([7*7*64,1024])#隱藏層結點數爲1024
b_fc1 = bias_variable([1024])
#將第二層pool後的數據變化爲一維的數據
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) + b_fc1)
#加一個dropout的處理
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
#第二層的全連接層
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
#利用softmax計算預測值概率
prediction = tf.nn.softmax(tf.matmul(h_fc1,W_fc2) + b_fc2)
#計算輸出誤差
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),
reduction_indices = [1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(1000):
batch_xs,batch_ys = mnist.train.next_batch(100)
sess.run(train_step,feed_dict = {xs:batch_xs,ys:batch_ys,keep_prob:1})
if i %50 == 0:
print(compute_accuracy(mnist.test.images,mnist.test.labels))