tensorflow學習--在mnist集上softmax迴歸後進行CNN訓練(三)

由於一天知識量有點大。今天和明天或後天就圍繞這前三個博客展開更深度的學習。比如公式推導,數組維數的輸入之類。

參考網站:http://wiki.jikexueyuan.com/project/tensorflow-zh/tutorials/mnist_beginners.html

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
mnist = input_data.read_data_sets("/home/cage/TensorFlow/mnist", one_hot=True)

x = tf.placeholder(tf.float32, [None, 784])
#x圖片的數量不定 一張圖片784個點
w = tf.Variable(tf.zeros([784,10]))
#一張圖片像素點爲28x28 共計10個數字類別

b = tf.Variable(tf.zeros([10]))
y_ = tf.placeholder("float", [None,10])
y = tf.nn.softmax(tf.matmul(x,w) + b)
#  y =  w*X + b  其中b是十維的

cross_entropy = -tf.reduce_sum(y_*tf.log(y))
#這部分計算交叉熵   累加y_*logy

train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
  batch_xs, batch_ys = mnist.train.next_batch(100)
  sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#tf.cast:將bool值轉換成浮點 然後通過tf.reduce_mean取平均

print (sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))

#以上爲softmax(二)的代碼
#以下爲CNN部分以提高準確率
sess = tf.InteractiveSession()
def weight_variable(shape):
    initial = tf.truncated_normal(shape,stddev=0.1)
    return tf.Variable(initial)
#tf.truncated_normal   生成的隨機值服從平均值mean=0,偏差stddev=0.1的正態分佈。如果大於兩個偏差大小則捨去


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')


def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],
                            strides=[1,2,2,1], padding='SAME')
w_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image,w_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

w_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])


h_conv2 = tf.nn.relu(conv2d(h_pool1,w_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

w_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])


h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
w_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])


y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop,w_fc2) + b_fc2)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float"))
sess.run(tf.global_variables_initializer())
for i in range(20000):
    batch = mnist.train.next_batch(50)
    if i%100 == 0:
        train_accuracy = accuracy.eval(feed_dict={
                x:batch[0],y_:batch[1],keep_prob:1.0})
        print ("step %d, training accuracy %g"%(i, train_accuracy))
        train_step.run(feed_dict={x:batch[0],y_:batch[1],keep_prob:0.5})
    
print ("test accuracy %g"%accuracy.eval(feed_dict={
            x:mnist.test.images, y_:mnist.test.labels, keep_prob:1.0
        }))

上面代碼註釋這兩天整理筆記補出來 

最後運行結果,因爲是小破mac,跑不動。 只出來瞭如圖的結果。


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章