線性迴歸模型
by - YH
Date - 20/11/2019
-
從今天開始我的機器學習之路,以後會不定期的更新,敬請大家期待!
本文主要運用TensorFlow實現了機器學習中的線性迴歸模型,這裏是在Jupyter Notebook上面實現的。
%matplotlib inline
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
np.random.seed(28) #如果使用相同的num,那麼每一次產生的隨機數都相同
N=100
train_x=np.linspace(0,6,N) + np.random.normal(loc=0.0,scale=2,size=N)
train_y=14*train_x-7+np.random.normal(loc=0.0,scale=5.0,size=N)
plt.scatter(train_x,train_y)
plt.show()
def print_info(r_w, r_b, r_loss):
print('w={},b={},loss={}'.format(r_w, r_b, r_loss))
開啓會話,運行計算圖,訓練模型
#創建計算圖
with tf.Graph().as_default():
with tf.name_scope('Input'):
#s輸入佔位符
X=tf.placeholder(tf.float32,name='X')
Y_true=tf.placeholder(tf.float32,name='Y_true')
with tf.name_scope('Inference'):
#模型參數變量
w=tf.Variable(tf.zeros([1]),name='weight')
b=tf.Variable(tf.zeros([1]),name='bias')
y_pred=tf.add(tf.multiply(X,w),b) #y=wx+b
with tf.name_scope('Loss'):
#損失函數
TrainLoss=tf.reduce_mean(tf.square(Y_true-y_pred))
with tf.name_scope('Train'):
#建立一個優化器
Optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05)
TrainOp=Optimizer.minimize(TrainLoss)
# 添加所有variable類型的變量的初始化節點
init_op = tf.global_variables_initializer()
# 保存計算圖
writer = tf.summary.FileWriter(logdir='logs_linear_regression', graph=tf.get_default_graph())
writer.close()
# 構建會話,運行
print('開啓會話,運行計算圖,訓練模型')
with tf.Session() as sess:
sess.run(init_op)
for step in range(N):
_, train_w, train_b, train_loss = sess.run([TrainOp, w, b, TrainLoss], feed_dict={X: train_x, Y_true: train_y})
print_info(train_w, train_b, train_loss)
plt.scatter(train_x,train_y)
plt.plot(train_x,r_w*train_x+r_b,label='Fitted Line')
plt.legend()
plt.show()
w=[13.829246],b=[-6.4065833],loss=29.9353084564209