Logistic迴歸的目的是尋找一個非線性函數Sigmiod的最佳擬合參數,求解過程可以由最優化算法來完成 。在最優 算法中,最常用 的就是梯度上升算法,而梯度上升算法又可以簡化爲隨機梯度上升法 。
隨機梯度上升算法與梯度上升算法的效果相當,但佔用更少的計算資源。此外,隨機梯度上升是一個在線算法,它可以在新數據到來時就完成參數更新,而不需要重新讀取整個數據集來進行批處理運算。
import numpy as np
def loadDataSet(): #導入數據
dataMat = []
labelMat = []
fr = open(r'F:\算法學習\機器學習書籍\機器學習實戰(Python2.7)\Ch05\testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])]) #第一列爲X1,第二列爲X2
labelMat.append(int(lineArr[2])) #第三列爲類別標籤
return dataMat, labelMat
def sigmoid(inX): #sigmoid函數
return 1.0/(1 + np.exp(-inX))
#梯度上升算法,與梯度下降法一樣,前者求最大值,後者求最小值
def gradAscent(dataMatIn, classLabels):
#輸入參數爲訓練樣本及對應的類標籤
dataMatrix = np.mat(dataMatIn) #將輸入訓練樣本數組轉換成矩陣形式
labelMat = np.mat(classLabels).T #類別標籤向量,轉爲列向量方便處理,若本身就是列向量,則本行可註釋掉
m, n = np.shape(dataMatrix)
alpha = 0.001 #步長
maxCycles = 500 #最大迭代次數
weights = np.ones((n,1))
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = (labelMat - h)
weights = weights + alpha * dataMatrix.T * error #權值更新量,即求解損失函數對應的拉格朗日函數的極值
return weights #返回的迴歸係數
#畫出數據集和Logistic迴歸最佳擬合直線
def plotBestFit(wei):
import matplotlib.pyplot as plt
weights = wei.getA()
dataMat, labelMat = loadDataSet()
dataArr = np.array(dataMat)
n = np.shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s = 30, c = 'red', marker = 's')
ax.scatter(xcord2, ycord2, s = 30, c = 'green')
x = np.arange(-3.0, 3.0, 0.1)
y = (-weights[0] -weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2')
plt.show()
def stocGradAscent0(dataMatrix, classLabels):
m, n = np.shape(dataMatrix)
alpha = 0.01
weights = np.ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h #與梯度上升法的區別在於h和error的計算結果爲數值,並且不需要進行矩陣轉換
weights = weights + alpha * error * dataMatrix[i]
return weights
#改進的隨機梯度上升法
def stocGradAscent1(dataMatrix, classLabels, numIter = 150):
m, n = np.shape(dataMatrix)
weights = np.ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4/(1.0+j+i)+0.01 #將alpha改爲隨迭代次數而變化
randIndex = int(np.random.uniform(0,len(dataMatrix)))
h = sigmoid(sum(dataMatrix[randIndex] * weights)) #隨機選取dataMatrix中的數進行更新
error = classLabels[randIndex] - h #與梯度上升法的區別在於h和error的計算結果爲數值,並且不需要進行矩陣轉換
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights