將上一篇文章的代碼翻譯爲Tensorflow。
鏈接:https://blog.csdn.net/seTaire/article/details/93760032
訓練結果:
import numpy as np
import tensorflow as tf
def randomdata(classes, numberperclass, dimension):
x = np.zeros((classes * numberperclass, dimension))
y = np.zeros(classes * numberperclass, dtype='uint8')
for j in range(classes):
ix = list(range(numberperclass*j, numberperclass*(j + 1)))
r = np.linspace(0.0, 1, numberperclass)
t = np.linspace(j*4, (j+1)*4, numberperclass) + np.random.randn(numberperclass)*0.2
x[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ix] = j
return x, np.eye(classes)[y]
if __name__ == '__main__':
classes = 3
numberperclass = 100
dimension = 2
hidden_number = 100
step_size = 1
reg = 0.001
totalnumber = classes * numberperclass
train_x, train_y = randomdata(classes, numberperclass, dimension)
x = tf.placeholder("float", [totalnumber, dimension])
y = tf.placeholder("float", [totalnumber, classes])
W = tf.Variable(tf.random_normal([dimension, hidden_number]), trainable=True)
b = tf.Variable(tf.random_normal([1, hidden_number]), trainable=True)
hidden_layer = tf.nn.relu( tf.matmul(x, W) + b )
W2 = tf.Variable(tf.random_normal([hidden_number, classes]), trainable=True)
b2 = tf.Variable(tf.random_normal([1, classes]), trainable=True)
probs = tf.nn.softmax( tf.matmul(hidden_layer, W2) + b2 )
cross_entropy = tf.reduce_mean( tf.square(probs-y) )
tf.add_to_collection('loss', cross_entropy)
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(W))
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(b))
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(W2))
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(reg)(b2))
loss = tf.add_n(tf.get_collection("loss"))
train_step = tf.train.GradientDescentOptimizer(step_size).minimize(loss)
correct_prediction = tf.equal(tf.argmax(probs,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# 損失模型隱藏到loss-model模塊
with tf.name_scope("loss-model"):
# 給損失模型的輸出添加scalar,用來觀察loss的收斂曲線
tf.summary.scalar("loss", loss)
with tf.name_scope("accuracy-model"):
tf.summary.scalar("accuracy", accuracy)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 調用 merge_all() 收集所有的操作數據
merged = tf.summary.merge_all()
# 模型運行產生的所有數據保存到 /tmp/tensorflow 文件夾供 TensorBoard 使用
writer = tf.summary.FileWriter('/tmp/tensorflow', sess.graph)
for i in range(1000):
_, summary = sess.run([train_step, merged], feed_dict={x: train_x, y: train_y})
writer.add_summary(summary, i)
# if i % 100 == 0:
# print("test accuracy %g"%accuracy.eval(feed_dict={x:train_x, y: train_y}))
# print(loss.eval(feed_dict={x:train_x, y: train_y}))