代碼:https://github.com/BeCuriousCat/LearningML/blob/master/MLP_tensorflow.ipynb
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/",one_hot = True)
可以看到MNIST數據集中的數據如圖所示
# 參數
#學習率,迭代次數,batch大小
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# 網絡參數
n_hidden_1 = 256 # 第一層的特徵數(神經元數)
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST 輸入
n_classes = 10 # MNIST 類別數(0-9)
# tf 圖的輸入
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
設置全局參數和佔位符
# 創建多層感知機模型
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# 權重、偏置參數
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
定義模型與初始化參數
# 創建模型
pred = multilayer_perceptron(x, weights, biases)
# 定義 loss 和 optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
創建模型和定義損失函數、優化器
#初始化變量
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# 迭代次數
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
# 計算平均誤差
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print( "Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
#獲取隨機圖片
import random
index = list(range(1,len(mnist.test.images)))
random.shuffle(index)
index = index[0:10]
inp = [mnist.test.images[i] for i in index]
#顯示圖片
fig, ax = plt.subplots(
nrows=2,
ncols=5,
sharex=True,
sharey=True, )
ax = ax.flatten()
for i in range(10):
img = inp[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
#預測輸出
feed_dict = {x:inp}
model_pred = tf.argmax(pred,1)
classification = sess.run(model_pred,feed_dict)
print(classification)
收集參數,訓練、預測。
可以看到經過15輪迭代之後,正確率已經達到了94.6%了
抽樣輸出預測結果