神經網絡實現SinX曲線擬合/逼近.

SinX函數曲線的擬合/逼近.

說明:原代碼來自羅冬日-tensorfow實戰書籍代碼,且該代碼選自googl官方案例.

  • 網絡層次結構:W 爲16維,depth爲3,也就說說合計參數48個.
  • 採用隨機梯度優化,但是原書中提示實際運用中Adam(亞當)優化效果更好:收斂速度更快.
  • 代碼在原代碼有少部分修改.
  • 運行結果如下:起始/5w次後的結果.
    00001.PNG
    00002.PNG
  • 代碼.
    # coding=utf-8
    import tensorflow as tf
    import numpy as np
    import matplotlib.pyplot as plt
    import pylab
    
    '''
      用TensorFlow來擬合一個正弦函數.
      代碼選自:羅冬日-tensorflow實戰.
      羅冬日代碼改寫自google官方案例.
    '''
    #分配第一塊顯卡,若是在tensorflow-gpu下默認使用的顯卡,可以不添加改行.
    # with tf.device("/gpu:0"):
    
    # 改行用於每次運行都重置變量,否則同名變量無法運行.
    tf.reset_default_graph()
    
    #定義顯存分配百分比,貌似不準確,有點誤差..
    config = tf.ConfigProto()
    # %matplotlib qt
    # config.gpu_options.per_process_gpu_memory_fraction =  0.6 # 分配60%顯存.
    config.gpu_options.allow_growth =  True # 自動分配.
    with tf.Session(config = config) as sess:
        def draw_correct_line():
            '''
              繪製標準的sin曲線
            '''
            x = np.arange(0, 2 * np.pi, 0.01)
            x = x.reshape((len(x), 1))
            y = np.sin(x)
    
            pylab.plot(x, y, label='standard sin:')
            plt.axhline(linewidth=1, color='r')
    
    
        def get_train_data():
            '''
              返回一個訓練樣本(train_x, train_y),
              其中train_x是隨機的自變量, train_y是train_x的sin函數值
            '''
            train_x = np.random.uniform(0.0, 2 * np.pi, (1))
            train_y = np.sin(train_x)
            return train_x, train_y  
    
    
        def inference(input_data):
            '''
            定義前向計算的網絡結構.
            Args:
              輸入的x的值,單個值.
            網路層的weight和bias初始化爲:均值爲0,方差爲1.
            '''
    
            with tf.variable_scope('hidden1'):
                # 第一個隱藏層,採用16個隱藏節點
                weights = tf.get_variable("weight", [1, 16], tf.float32,
                                          initializer=tf.random_normal_initializer(0.0, 1))
                biases = tf.get_variable("biase", [1, 16], tf.float32,
                                         initializer=tf.random_normal_initializer(0.0, 1))
                hidden1 = tf.sigmoid(tf.multiply(input_data, weights) + biases)
    
            with tf.variable_scope('hidden2'):
                # 第二個隱藏層,採用16個隱藏節點
                weights = tf.get_variable("weight", [16, 16],  tf.float32,
                                          initializer=tf.random_normal_initializer(0.0, 1))
                biases = tf.get_variable("biase", [16], tf.float32,
                                         initializer=tf.random_normal_initializer(0.0, 1))
    
                mul = tf.matmul(hidden1, weights)
                hidden2 = tf.sigmoid(mul + biases)
    
            with tf.variable_scope('hidden3'):
                # 第三個隱藏層,採用16個隱藏節點
                weights = tf.get_variable("weight", [16, 16],  tf.float32,
                                          initializer=tf.random_normal_initializer(0.0, 1))
                biases = tf.get_variable("biase", [16], tf.float32,
                                         initializer=tf.random_normal_initializer(0.0, 1))
                hidden3 = tf.sigmoid(tf.matmul(hidden2, weights) + biases)
    
            with tf.variable_scope('output_layer'):
                # 輸出層
                weights = tf.get_variable("weight", [16, 1],  tf.float32,
                                          initializer=tf.random_normal_initializer(0.0, 1))
                biases = tf.get_variable("biase", [1], tf.float32,
                                         initializer=tf.random_normal_initializer(0.0, 1))
                output = tf.matmul(hidden3, weights) + biases
    
            return output
    
    
        def train():
            # 學習率
            learning_rate = 0.01
            x = tf.placeholder(tf.float32)
            y = tf.placeholder(tf.float32)
    
            net_out = inference(x)
    
            # 定義損失函數的op
            loss_op = tf.square(net_out - y)
    
            # 採用隨機梯度下降的優化函數
            opt = tf.train.GradientDescentOptimizer(learning_rate)
            # opt = tf.train.AdamOptimizer.(learning_rate)
    
            train_op = opt.minimize(loss_op)
    
            init = tf.global_variables_initializer()
    
            with tf.Session() as sess:
                sess.run(init)
                print("start training....")
                # 訓練次數,實際上在3w次後幾乎完全擬合,矩陣/節點的大小是可以提高擬合速度的,但是效果明顯低於不入擬合次數。
                train_times = 1000000
    
                for i in range(train_times + 1):
                    train_x, train_y = get_train_data()
                    sess.run(train_op, feed_dict={x: train_x, y: train_y})
    
                    # 每一萬次畫圖.
                    if i % 10000 == 0:
                        times = int(i / 10000)
                        test_x_ndarray = np.arange(0, 2 * np.pi, 0.01)
                        test_y_ndarray = np.zeros([len(test_x_ndarray)])
                        ind = 0
                        for test_x in test_x_ndarray:
                            test_y = sess.run(net_out, feed_dict={x: test_x, y: 1})
                            np.put(test_y_ndarray, ind, test_y)
                            ind += 1
                        # 先繪製標準的sin函數的曲線,
                        # 再用虛線繪製我們計算出來模擬sin函數的曲線
                        draw_correct_line()
                        pylab.plot(test_x_ndarray, test_y_ndarray,
                                   '--', label=str(times) + ' times')
                        pylab.show()
                print("end.")
    
    if __name__ == "__main__":
            train()
    
    
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章