梯度下降:求線性迴歸

梯度下降的直線擬合

實現說明

給定若干個x,yx, y並且求得一個最佳的y=ax+by = ax + b,也就是二元一次方程組的解。

先放上給定的散點,以及求得的線性迴歸的直線的圖片。

我個人認爲,這裏的梯度優化,就是通過一個關鍵式子loss=(ax+by)2loss = \sum(ax + b - y) ^{2},通過求解這個凹函數來得到他的最小值從而實現整個線性迴歸方程的最優解,具體的實現就如下分析。

lossa=2x(ax+by)\frac{\partial{loss}}{\partial{a}} = 2x(ax + b - y)

lossb=2(ax+by)\frac{\partial{loss}}{\partial{b}} = 2(ax + b - y)

由此我們每次梯度下降更新的a=alossalearning_ratea = a - \frac{\partial{loss}}{\partial{a}} * learning\_rate

同樣的每次梯度下降更新的b=blossblearning_rateb = b - \frac{\partial{loss}}{\partial{b}} * learning\_rate

然後通過這個迭代更新去得到最優損失的lossloss,同時a,ba, b,也會在這個時候更新爲最優值

PY‘S CODE

import torch
import numpy as np
import matplotlib.pyplot as plt

x1 = np.array([1.1, 2.4, 2.4, 3.1, 2.2, 4.42, 5.43, 4.5, 5.28, 7.35, 10, 8.27, 12.6, 12.8, 9.69, 15.0, 13.69])
y1 = np.array([2.5, 1.7, 3, 4.0, 5.2, 6.53, 7.33, 8.7, 4.2, 5.8, 6.05, 8.05, 7.41, 8.89, 10.12, 9.72, 10.83])

def calc_error(a, b, data):
    sum = 0
    for i in range(len(data)):
        x, y = data[i][0], data[i][1]
        sum += (a * x + b - y) ** 2
    return sum / (float)(len(data))

def gradient_step(now_a, now_b, data, learning_rate):
    gradient_a, gradient_b = 0, 0
    for i in range(len(data)):
        x, y = data[i][0], data[i][1]
        gradient_a += 2 * x * (now_a * x + now_b - y)
        gradient_b += 2 * (now_a * x + now_b - y)
    gradient_a /= len(data)#取導數的平均值
    gradient_b /= len(data)
    new_a = now_a - learning_rate * gradient_a
    new_b = now_b - learning_rate * gradient_b
    return [new_a, new_b]

def algorithm(start_a, start_b, data, learning_rate, iterator_num):
    a, b = start_a, start_b
    for i in range(iterator_num):
        a, b = gradient_step(a, b, data, learning_rate)
    return [a, b]

def run():
    # 1.1, 2.4, 2.4, 3.1, 2.2, 4.42, 5.43, 4.5, 5.28, 7.35, 10, 8.27, 12.6, 12.8, 9.69, 15.0, 13.69
    # 2.5, 1.7, 3, 4.0, 5.2, 6.53, 7.33, 8.7, 4.2, 5.8, 6.05, 8.05, 7.41, 8.89, 10.12, 9.72, 10.83
    data = np.array([[1.100000, 2.500000], [2.400000, 1.700000], [2.400000, 3.000000],
                    [3.100000, 4.000000], [2.200000, 5.200000], [4.420000, 6.530000],
                    [5.430000, 7.330000], [4.500000, 8.700000], [5.280000, 4.200000],
                    [7.350000, 5.800000], [10.000000, 6.050000], [8.270000, 8.050000],
                    [12.600000, 7.410000], [12.800000, 8.890000], [9.690000, 10.120000],
                    [15.000000, 9.720000], [13.690000, 10.830000]])
    a, b = 0, 0
    # for i in range(1, 6):#通過改變迭代次數,對比其答案,
    #     iterator_num = 10 ** i
    #     print("iterator_num is {0}".format(iterator_num))
    #     print("befor a:{0}, b:{1}, error{2}".format(a, b, calc_error(a, b, data)))
    #     a, b = algorithm(a, b, data, 0.0001, iterator_num)
    #     print("after a:{0}, b:{1}, error{2}".format(a, b, calc_error(a, b, data)))
    #     print("")
    a, b = algorithm(a, b, data, 0.001, 100000)#選了一個稍優的迭代次數,
    print("My's {0}, {1} Standard's {2}, {3}".format(a, b, 0.487713, 3.0308))
    print("")
    # for i in range(len(data)):
    #     print("Data's y : {0} My's y : {1} Standard's y : {2}".format(data[i][1], a * data[i][0] + b, 0.487713 * data[i][0] + 3.0308))
    #     print("")
    return [a, b]

if __name__ == "__main__":
    plt.scatter(x1, y1, color = "red", label = "point")
    a, b = run()
    x = x1
    y = a * x + b
    plt.plot(x, y, label = "line")
    plt.legend(loc = "best")
    plt.show()
    # print("heloo, word")
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章