Tensorflow+CNN

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import  input_data

# 載入數據
mnist = input_data.read_data_sets('F:\Pycharm projection\MNIST_data',one_hot=True)

# 每個批次的大小
batch_size = 100
# 計算一共有多少個批次
n_batch = mnist.train.num_examples//batch_size

# 初始化權值
def weight_varible(shape):
    initial = tf.truncated_normal(shape,stddev=0.1)     # 生成一個截斷的正太分佈
    return tf.Variable(initial)

# 初始化偏置
def bias_variable(shape):
    initial = tf.constant(0.1,shape=shape)
    return tf.Variable(initial)

# 卷積層
"""
x input tensor of shape [batch,in_height,in_width,in_channels]
w filter / kernel tensor of shape[filter_height,filter_width,in_channels,out_channels]
Strides[0]=strides[3]=1,strides[1]代表x方向的步長,strides[2]代表y方向的步長
 padding: A string from:"SAME","VALID"
"""
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')

# 池化層
# ksize[1,x,y,1] 窗口大小,x,y 方向
def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

# 定義2個placeholder
x = tf.placeholder(tf.float32,[None,784])   #       28*28     行784列   輸入是一維的,但圖片是平面2維的,所以需要格式的轉換
y = tf.placeholder(tf.float32,[None,10])

# 改變x格式轉爲4D的向量[batch,in_height,on_width,in_channels]
x_image = tf.reshape(x,[-1,28,28,1])     # 784復原爲28*28,1代表圖片是1維的黑白,如果是3代表是彩色圖片,通道數1

# 初始化第一個卷積層的權值和偏置
w_conv1 = weight_varible([5,5,1,32]) # 5*5的採樣窗口,32個卷積核從1個平面抽取特徵
b_conv1 =bias_variable([32]) # 每一個卷積核一個偏置值

# 把x_image和權值向量進行卷積,再加上偏置值,然後應用於relu激活函數
h_conv1 = tf.nn.relu(conv2d(x_image,w_conv1)+ b_conv1)
h_pool1 = max_pool_2x2(h_conv1) # 進行max-pooling

# 初始化第二個卷積層的權值和偏置
w_conv2 =weight_varible([5,5,32,64]) # 5*5採樣窗口,64個卷積核從32個平面抽取特徵
b_conv2 =bias_variable([64])  # 每個卷積核一個偏置值

# 把h_pool1和權值向量進行卷積,再加上偏置值,然後應用於relu激活函數
h_conv2 = tf.nn.relu(conv2d(h_pool1,w_conv2)+ b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

"""
28*28的圖片第一次卷積後還是28*28,第一次池化後變爲14*14
第二次卷積後衛14*14,第二次池化後變爲7*7
通過上面操作後得到64張7*7的平面
"""

# 初始化第一個全連接層的權值
w_fc1 =weight_varible([7*7*64,1024]) # 上一層有7*7*64個神經元,全連接層有1024個神經元
b_fc1 = bias_variable([1024]) # 1024個節點

# 把池化層的第二層輸出扁平化爲1維
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])  # -1 代表任意值,這兒是批次100
# 求第一個全連接層的輸出
h_fc1 =tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1)+b_fc1)

#keep_prob用來表示神經元的輸出概率
keep_prob = tf.placeholder(tf.float32)
h_fc1_dropout =tf.nn.dropout(h_fc1,keep_prob)


# 初始化第二個全連接層
w_fc2 = weight_varible([1024,10])  # 10代表10個分類
b_fc2 = bias_variable([10])
# 計算輸出
prediction = tf.nn.softmax(tf.matmul(h_fc1_dropout,w_fc2)+b_fc2)  # 值轉化爲概率輸出

# 交叉熵代價函數
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
# 使用AdamOptimizer進行優化
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# 結果存放在一個布爾列表中
correct_prediction = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))  # argmax返回一維張量中最大的值所在的位置
# 求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))  # cast作用把布爾型轉成32位的float

# 初始化全局變量
init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(21):
        for batch in range(n_batch):
            batch_xs,batch_ys, = mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7})

        acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
        print('Iter'+str(epoch),'testing accuracy'+str(acc))

運行結果:

Iter0 testing accuracy0.9504

Iter1 testing accuracy0.9706

Iter2 testing accuracy0.9768

Iter3 testing accuracy0.9772

Iter4 testing accuracy0.984

Iter5 testing accuracy0.9846

Iter6 testing accuracy0.9857

Iter7 testing accuracy0.9883

Iter8 testing accuracy0.9871

Iter9 testing accuracy0.989

Iter10 testing accuracy0.9872

Iter11 testing accuracy0.9904

Iter12 testing accuracy0.9883

Iter13 testing accuracy0.989

Iter14 testing accuracy0.9878

Iter15 testing accuracy0.9907

Iter16 testing accuracy0.9911

Iter17 testing accuracy0.9899

Iter18 testing accuracy0.9905

Iter19 testing accuracy0.99

Iter20 testing accuracy0.9911

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章