import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 數據準備
np.random.seed(5)
x_data = np.linspace(-1, 1, 100)
y_data = x_data*2+np.random.rand(*x_data.shape)*0.4
plt.figure()
plt.scatter(x_data, y_data)
# 構建模型 x*w+b
x = tf.placeholder('float', name='x')
y = tf.placeholder('float', name='y')
w = tf.Variable(1.0, name='w0')
b = tf.Variable(0.0, name='b0')
def model(x, w, b):
return(tf.multiply(x, w)+b)
# 建立模型操作
preb = model(x, w, b)
# 超參設定
train_epochs = 10 # 訓練輪數
learning_rate = 0.05 # 學習率
loss_function = tf.reduce_mean(tf.square(y-preb)) # 利用均方差做loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
step = 0 # 步數計數
disply_step = 10 # 顯示loss的粒度
loss_list = [] # loss集合
# 運行與訓練
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# 逐次執行超參設定的輪數
for epoch in range(train_epochs):
for xs, ys in zip(x_data, y_data): # zip將x,y表示爲(x,y)格式
_, loss = sess.run([optimizer, loss_function], feed_dict={x: xs, y: ys})
loss_list.append(loss)
step = step + 1
if step % disply_step == 0: # 對應粒度下顯示loss值
print(loss)
b0temp = b.eval(session=sess) # 在session會話下求值
w0temp = w.eval(session=sess)
plt.plot(x_data, x_data*w0temp+b0temp) # 每次訓練一輪將得到的結果繪製
plt.figure()
plt.plot(loss_list, 'r+') #繪製總的loss
plt.show()
具體代碼如上。