tensorflow3.mnist實戰

import numpy as np
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./input_data', one_hot=True) #自動下載


def base_layer(input,in_size,out_size,activation_fun = None): # input個神經元 output神經元
    w = tf.Variable(tf.random_normal([in_size,out_size]))  # 生成一個大小爲[in_size,out_size]的權重矩陣
    b = tf.Variable(tf.zeros([1,out_size])+0.1)            # 生成一個大小爲[1,out_size]的偏差矩陣,元素值都爲0.1
    y = tf.matmul(input,w)+b # n行1列 乘以 1行10列 = n行10列   +1行10列 = n行10列
    if activation_fun is None:
        output = y
    else:
        output =activation_fun(y)
    return output

def compute_accuracy(test_images,test_labels):
    global result
    #預測結果
    y_pre = sess.run(result,feed_dict={xs:test_images})
    #這兩行計算模型準確度
    correct_accuracy = tf.equal(tf.argmax(y_pre,1),tf.argmax(test_labels,1))
    accuracy = tf.reduce_mean(tf.cast(correct_accuracy,tf.float32))
    #輸出百分比
    re = sess.run(accuracy,feed_dict={xs:test_images,ys:test_labels})
    return re


if __name__=="__main__":

    xs = tf.placeholder(tf.float32,[None,784]) #28x28
    ys = tf.placeholder(tf.float32,[None,10])  #每個樣本有十個輸出

    result = base_layer(xs,784,10,activation_fun=tf.nn.softmax) #softmax分類

    #交叉熵
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(result),reduction_indices=[1]))
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    for i in range(1000):
        batch_xs,batch_ys = mnist.train.next_batch(100) #從下載好的data中獲取batch
        sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys})
        if i%50==0:
            print(compute_accuracy(mnist.test.images,mnist.test.labels))

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章