Tensorflow學習:卷積神經網絡的搭建+MNIST手寫數字的識別(分批訓練,達到訓練集99%,測試集99%準確率)+保存模型和讀取模型

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm

import tensorflow as tf

learning_rate = 1e-4
training_iterations = 2500

dropout = 0.5
batch_size = 50

vallidation_size = 3000
image_to_display = 10

from tensorflow.examples.tutorials.mnist import input_data

mnist_input = input_data.read_data_sets("./MNIST_data", one_hot=True)

training = mnist_input.train.images

trainlabel = mnist_input.train.labels
testing = mnist_input.test.images
testlabel = mnist_input.test.labels



image_width = 28
image_height = 28
image_size = training.shape[1]


def display(img):
    one_image = img.reshape(image_width, image_height)
    plt.axis('off')
    plt.imshow(one_image, cmap=cm.binary)


display(training[1])  # 3
# 3

vallidation_images = training[:vallidation_size]
vallidation_labels = trainlabel[:vallidation_size]
# print(vallidation_images.shape)
# print(vallidation_labels.shape)
train_images = training[vallidation_size:]
train_labels = trainlabel[vallidation_size:]



# 權重初始化
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variavle(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)


# 卷積操作 2維的卷積
def conv2d(x1, w):
    # 第一個維度是batch_size
    # 第二個是高
    # 第三個是寬
    # 第四個是通道 所以這裏strides是一個四維的.
    # 在修改的時候改的應該是第二個和第三個參數
    return tf.nn.conv2d(x1, w, strides=[1, 1, 1, 1], padding='SAME')


# 池化
# [[0,3],
#  [4,2]] => 4
# a,b是選擇一個池化的大小
# ksize=[1,a,b,1]
def max_pool_2x2(x2):
    return tf.nn.max_pool(x2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')



labels_count = trainlabel.shape[1]
# images
x = tf.placeholder('float', shape=[None, image_size])
# print(x.get_shape())
# labels
y_ = tf.placeholder('float', shape=[None, labels_count])
# print(y_.get_shape())

# 第一層卷積層 first convolution layer
W_conv1 = weight_variable([5, 5, 1, 32])
# print(W_conv1.get_shape())
# 前面兩個代表filter的大小(長x寬)
# 第三個是連接的通道 第四個32代表有多少個filter
b_conv1 = bias_variavle([32])
# print(b_conv1.get_shape())
# b值的大小 32個圖

# 圖形的轉換
image = tf.reshape(x, [-1, image_width, image_height, 1])
# print(image.get_shape())
# 第一層卷積
# 第一次卷積 + 激活
h_covn1 = tf.nn.relu(conv2d(image, W_conv1) + b_conv1)
# print(h_covn1.get_shape())
# 第一次池化
h_pool1 = max_pool_2x2(h_covn1)
# print(h_pool1.get_shape())

# 第二層卷積
# 輸入是32  輸出是64
# 5*5的filter
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variavle([64])

h_covn2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# print(h_covn2.get_shape())
h_pool2 = max_pool_2x2(h_covn2)
# print(h_pool2.get_shape())

# 全連接層
# 拉長長度,向量長度
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variavle([1024])

# -1是一個缺省值 計算後得到自動替換
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print(h_fc1.get_shape())

# dropout
keep_prob = tf.placeholder('float')
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# print(h_fc1_drop.get_shape())

W_fc2 = weight_variable([1024, labels_count])
b_fc2 = bias_variavle([labels_count])

y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# print(y.get_shape())

cross_entropy = -tf.reduce_sum(y_ * tf.log(y))

train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)

cross_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))

accuracy = tf.reduce_mean(tf.cast(cross_prediction, 'float'))
# print(accuracy)

predict = tf.arg_max(y, 1)
# print(predict)
# print(tf.arg_max(y_, 1))

epochs_completed = 0
index_in_epoch = 0
num_examples = train_images.shape[0]


# 迭代
def next_batch(batch_size):
    global train_images
    global train_labels
    global index_in_epoch
    global epochs_completed

    start = index_in_epoch
    index_in_epoch += batch_size

    # print("start => ", start)
    if index_in_epoch > num_examples:
        epochs_completed += 1
        perm = np.arange(num_examples)
        np.random.shuffle(perm)
        train_images = train_images[perm]
        train_labels = train_labels[perm]
        # next epoch
        start = 0
        index_in_epoch = batch_size
        assert batch_size <= num_examples
    end = index_in_epoch
    # print("start => ", start,"    | end => ",end)
    return train_images[start:end], train_labels[start:end]

# 初始化
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)

train_accuracies = []
vallidation_accuracies = []
x_range = []
display_step = 1
batch_xs, batch_ys = 0, 0
# saver =tf.train.Saver()
# 加載模型
print("---------------------------加載模型------------------------")
saver = tf.train.import_meta_graph('saver/RNN_01_model.meta')
saver.restore(sess,tf.train.latest_checkpoint('./saver/'))

for i in range(training_iterations):
    batch_xs, batch_ys = next_batch(batch_size)
    # print("batch_xs=",batch_xs.shape,"batch_ys=",batch_ys.shape)
    # 保存模型
    if i % display_step == 0 or (i + 1) == training_iterations:
        train_accuracy = accuracy.eval(feed_dict={x: batch_xs,
                                                  y_: batch_ys,
                                                  keep_prob: 1.0})
        if vallidation_size:
            vallidation_accuracy = accuracy.eval(feed_dict={x: vallidation_images[0:batch_size],
                                                            y_: vallidation_labels[0:batch_size],
                                                            keep_prob: 1.0})
            print('training_accuracy / validation_accuracy => %.2f / %.2f for step %d' % (
            train_accuracy, vallidation_accuracy, i))
            vallidation_accuracies.append(vallidation_accuracy)
        else:
            print('training_accuracy => %.4f for step %d' % (train_accuracy, i))
        train_accuracies.append(train_accuracy)
        x_range.append(i)
        # 保存模型
        if i%1000 ==0:
            saver.save(sess, './saver/RNN_01_model')

        if i % (display_step * 10) == 0 and i:
            display_step *= 10
    sess.run(train_step, feed_dict={x: batch_xs,y_: batch_ys,keep_prob: dropout})
training_accuracy / validation_accuracy => 0.24 / 0.16 for step 1
training_accuracy / validation_accuracy => 0.22 / 0.30 for step 2
training_accuracy / validation_accuracy => 0.36 / 0.28 for step 3
training_accuracy / validation_accuracy => 0.24 / 0.28 for step 4
training_accuracy / validation_accuracy => 0.30 / 0.34 for step 5
training_accuracy / validation_accuracy => 0.16 / 0.36 for step 6
training_accuracy / validation_accuracy => 0.22 / 0.32 for step 7
training_accuracy / validation_accuracy => 0.22 / 0.42 for step 8
training_accuracy / validation_accuracy => 0.26 / 0.30 for step 9
training_accuracy / validation_accuracy => 0.42 / 0.36 for step 10
training_accuracy / validation_accuracy => 0.54 / 0.54 for step 20
training_accuracy / validation_accuracy => 0.62 / 0.58 for step 30
training_accuracy / validation_accuracy => 0.64 / 0.62 for step 40
training_accuracy / validation_accuracy => 0.80 / 0.66 for step 50
training_accuracy / validation_accuracy => 0.82 / 0.72 for step 60
training_accuracy / validation_accuracy => 0.74 / 0.84 for step 70
training_accuracy / validation_accuracy => 0.74 / 0.76 for step 80
training_accuracy / validation_accuracy => 0.76 / 0.76 for step 90
training_accuracy / validation_accuracy => 0.68 / 0.80 for step 100
training_accuracy / validation_accuracy => 0.82 / 0.94 for step 200
training_accuracy / validation_accuracy => 0.94 / 0.94 for step 300
training_accuracy / validation_accuracy => 0.96 / 0.96 for step 400
training_accuracy / validation_accuracy => 0.92 / 0.96 for step 500
training_accuracy / validation_accuracy => 0.96 / 0.98 for step 600
training_accuracy / validation_accuracy => 0.90 / 0.98 for step 700
training_accuracy / validation_accuracy => 0.94 / 0.96 for step 800
training_accuracy / validation_accuracy => 0.94 / 0.96 for step 900
training_accuracy / validation_accuracy => 1.00 / 0.98 for step 1000
training_accuracy / validation_accuracy => 0.98 / 0.96 for step 2000
training_accuracy / validation_accuracy => 0.98 / 1.00 for step 2499
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章