機器學習系列(四) 梯度下降法 2020.6.6

前言

本節學習梯度下降法

  • 基於搜索的最優化方法
  • 用來最小化損失函數

1、梯度下降法的原理與簡單實現

在這裏插入圖片描述
看圖理解

  • 計算梯度
  • 每次根據學習率進行梯度下降
  • 最終得到最優解

學習率的取值影響最優解的速度

  • 太小則收斂太慢
  • 太大則可能不收斂
  • 需要調整學習率和初始點

實現如下

import numpy as np
import matplotlib.pyplot as plt
"""模擬梯度下降法"""
# 以一個二次函數爲損失函數
plot_x = np.linspace(-1., 6., 141)
plot_y = (plot_x-2.5)**2 - 1
# 損失函數
def J(theta):
    try:
        return (theta-2.5)**2 - 1.
    except:
        return float('inf')
# 導數
def dJ(theta):
    return 2 * (theta - 2.5)
"""
# 梯度下降法
eta = 0.1 #學習率
theta = 0.0 #起始點
epsilon = 1e-8 #判斷
theta_history = [theta]
while True:
    gradient = dJ(theta) #梯度
    last_theta = theta
    theta = theta - eta * gradient #梯度下降移一步
    theta_history.append(theta)
    if (abs(J(theta) - J(last_theta)) < epsilon):
        break
plt.plot(plot_x, J(plot_x))
plt.plot(np.array(theta_history), J(np.array(theta_history)), color="r", marker='+')
plt.show()
print(theta)
print(J(theta))"""
# 梯度下降法函數封裝
theta_history = []
def gradient_descent(initial_theta, eta, n_iters = 1e4,epsilon=1e-8):
    theta = initial_theta
    theta_history.append(initial_theta)
    i_iter = 0
    while i_iter < n_iters:
        gradient = dJ(theta)
        last_theta = theta
        theta = theta - eta * gradient
        theta_history.append(theta)
        if (abs(J(theta) - J(last_theta)) < epsilon):
            break
        i_iter += 1
    return
def plot_theta_history():
    plt.plot(plot_x, J(plot_x))
    plt.plot(np.array(theta_history), J(np.array(theta_history)), color="r", marker='+')
    plt.show()
eta = 0.01
theta_history = []
gradient_descent(0, eta)
plot_theta_history()

2、線性迴歸中的梯度下降法

在這裏插入圖片描述
公式如下
在這裏插入圖片描述
在這裏插入圖片描述
實現如下

import numpy as np
import matplotlib.pyplot as plt
"""線性迴歸中的梯度下降"""
# 爲了可視化,搞個一維數組
np.random.seed(666) #隨機種子
x = 2 * np.random.random(size=100)
y = x * 3. + 4. + np.random.normal(size=100)
X = x.reshape(-1, 1)
# 損失函數
def J(theta, X_b, y):
    try:
        return np.sum((y - X_b.dot(theta))**2) / len(X_b)
    except:
        return float('inf')
# 梯度
def dJ(theta, X_b, y):
    res = np.empty(len(theta))
    res[0] = np.sum(X_b.dot(theta) - y)
    for i in range(1, len(theta)):
        res[i] = (X_b.dot(theta) - y).dot(X_b[:,i])
    return res * 2 / len(X_b)
# 梯度下降
def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
    theta = initial_theta
    cur_iter = 0
    while cur_iter < n_iters:
        gradient = dJ(theta, X_b, y)
        last_theta = theta
        theta = theta - eta * gradient
        if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
            break
        cur_iter += 1
    return theta
# 參數
X_b = np.hstack([np.ones((len(x), 1)), x.reshape(-1,1)])
initial_theta = np.zeros(X_b.shape[1])
eta = 0.01
theta = gradient_descent(X_b, y, initial_theta, eta)
print(theta)

3、隨機梯度下降法

隨機梯度下降

  • 方向隨機,可以跳出局部最優解
  • 精度換時間
  • 學習率很重要,要逐漸遞減
  • 模擬退火的思想
  • scikit裏的算法更復雜更優化

實現如下

import numpy as np
import matplotlib.pyplot as plt
"""隨機梯度下降法"""
# 數據
m = 100000
x = np.random.normal(size=m)
X = x.reshape(-1,1)
y = 4.*x + 3. + np.random.normal(0, 3, size=m)
# 損失函數
def J(theta, X_b, y):
    try:
        return np.sum((y - X_b.dot(theta)) ** 2) / len(y)
    except:
        return float('inf')
# 梯度
def dJ_sgd(theta, X_b_i, y_i): #不是整個數據傳入,而是一行
    return 2 * X_b_i.T.dot(X_b_i.dot(theta) - y_i)
# 隨機梯度下降
def sgd(X_b, y, initial_theta, n_iters):
    # 公式裏ab兩個超參數
    t0, t1 = 5, 50
    # 學習率
    def learning_rate(t):
        return t0 / (t + t1)
    # 起始點
    theta = initial_theta
    for cur_iter in range(n_iters):
        rand_i = np.random.randint(len(X_b)) #隨機索引
        gradient = dJ_sgd(theta, X_b[rand_i], y[rand_i]) #對應的梯度
        theta = theta - learning_rate(cur_iter) * gradient
    return theta
X_b = np.hstack([np.ones((len(X), 1)), X])
initial_theta = np.zeros(X_b.shape[1])
theta = sgd(X_b, y, initial_theta, n_iters=m//3) #是個很重要的超參數
print(theta)

4、在前一篇的線性迴歸函數裏封裝進梯度下降法

import numpy as np
from sklearn.metrics import r2_score

"""用了梯度下降法的線性迴歸函數"""
class LinearRegression:
    def __init__(self):
        """初始化Linear Regression模型"""
        self.coef_ = None
        self.intercept_ = None
        self._theta = None
    
    def fit_normal(self, X_train, y_train):
        """根據訓練數據集X_train, y_train訓練Linear Regression模型"""
        assert X_train.shape[0] == y_train.shape[0], \
            "the size of X_train must be equal to the size of y_train"
        X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
        self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
        self.intercept_ = self._theta[0]
        self.coef_ = self._theta[1:]
        return self
    
    def fit_gd(self, X_train, y_train, eta=0.01, n_iters=1e4):
        """根據訓練數據集X_train, y_train, 使用梯度下降法訓練Linear Regression模型"""
        assert X_train.shape[0] == y_train.shape[0], \
            "the size of X_train must be equal to the size of y_train"
        # 損失函數
        def J(theta, X_b, y):
            try:
                return np.sum((y - X_b.dot(theta)) ** 2) / len(y)
            except:
                return float('inf')
        # 梯度
        def dJ(theta, X_b, y):
            return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(y) #向量化
        # 梯度下降
        def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
            # 參數
            theta = initial_theta
            cur_iter = 0
            while cur_iter < n_iters:
                gradient = dJ(theta, X_b, y)
                last_theta = theta
                theta = theta - eta * gradient
                if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
                    break
                cur_iter += 1
            return theta
        X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
        initial_theta = np.zeros(X_b.shape[1])
        self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)
        self.intercept_ = self._theta[0]
        self.coef_ = self._theta[1:]
        return self
    
    def fit_sgd(self, X_train, y_train, n_iters=50, t0=5, t1=50):
        """根據訓練數據集X_train, y_train, 使用隨機梯度下降法訓練Linear Regression模型"""
        assert X_train.shape[0] == y_train.shape[0], \
            "the size of X_train must be equal to the size of y_train"
        assert n_iters >= 1
        # 梯度
        def dJ_sgd(theta, X_b_i, y_i):
            return X_b_i * (X_b_i.dot(theta) - y_i) * 2
        # 隨機梯度下降
        def sgd(X_b, y, initial_theta, n_iters=5, t0=5, t1=50): #t0,t1是學習率的超參數
            def learning_rate(t):
                return t0 / (t + t1)
            theta = initial_theta
            m = len(X_b)
            for i_iter in range(n_iters):
                # 保證所有樣本都看一次
                indexes = np.random.permutation(m)
                X_b_new = X_b[indexes,:]
                y_new = y[indexes]
                for i in range(m):
                    gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
                    theta = theta - learning_rate(i_iter * m + i) * gradient
            return theta
        X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
        initial_theta = np.random.randn(X_b.shape[1])
        self._theta = sgd(X_b, y_train, initial_theta, n_iters, t0, t1)
        self.intercept_ = self._theta[0]
        self.coef_ = self._theta[1:]
        return self
    
    def predict(self, X_predict):
        """給定待預測數據集X_predict,返回表示X_predict的結果向量"""
        assert self.intercept_ is not None and self.coef_ is not None, \
            "must fit before predict!"
        assert X_predict.shape[1] == len(self.coef_), \
            "the feature number of X_predict must be equal to X_train"
        X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
        return X_b.dot(self._theta)
    
    def score(self, X_test, y_test):
        """根據測試數據集 X_test 和 y_test 確定當前模型的準確度"""
        y_predict = self.predict(X_test)
        return r2_score(y_test, y_predict)
   
    def __repr__(self):
        return "LinearRegression()"

5、梯度下降法的調試

  • 效果佳
  • 速度慢

實現如下

import numpy as np
import matplotlib.pyplot as plt
import datetime

"""對梯度下降法的調試"""
# 數據
np.random.seed(666)
X = np.random.random(size=(1000, 10))
true_theta = np.arange(1, 12, dtype=float) #最終應該得到的
X_b = np.hstack([np.ones((len(X), 1)), X])
y = X_b.dot(true_theta) + np.random.normal(size=1000) #加上一個噪音
# 損失函數
def J(theta, X_b, y):
    try:
        return np.sum((y - X_b.dot(theta))**2) / len(X_b)
    except:
        return float('inf')
# 之前數學法的梯度
def dJ_math(theta, X_b, y):
    return X_b.T.dot(X_b.dot(theta) - y) * 2. / len(y)
# 調試得到的梯度
def dJ_debug(theta, X_b, y, epsilon=0.01):
    res = np.empty(len(theta))
    for i in range(len(theta)):
        # 每次求一個維度的值
        theta_1 = theta.copy()
        theta_1[i] += epsilon
        theta_2 = theta.copy()
        theta_2[i] -= epsilon
        res[i] = (J(theta_1, X_b, y) - J(theta_2, X_b, y)) / (2 * epsilon)
    return res
# 梯度下降
def gradient_descent(dJ, X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
    theta = initial_theta
    cur_iter = 0
    while cur_iter < n_iters:
        gradient = dJ(theta, X_b, y)
        last_theta = theta
        theta = theta - eta * gradient
        if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
            break
        cur_iter += 1
    return theta
X_b = np.hstack([np.ones((len(X), 1)), X])
initial_theta = np.zeros(X_b.shape[1])
eta = 0.01
startTime = datetime.datetime.now()
theta1 = gradient_descent(dJ_debug, X_b, y, initial_theta, eta)
print(theta1)
endTime = datetime.datetime.now()
print("運行的時間是:%ss" % (endTime - startTime).seconds)
startTime = datetime.datetime.now()
theta2 = gradient_descent(dJ_math, X_b, y, initial_theta, eta)
print(theta2)
endTime = datetime.datetime.now()
print("運行的時間是:%ss" % (endTime - startTime).seconds)

結語

本節學習了梯度下降法
包括批量梯度下降法和隨機梯度下降法
並用進了前一篇的線性迴歸
梯度下降法是許多機器學習獲取最優解的良好手段

還有小批量梯度下降法
顯然是增加了一個批量大小的超參數

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章