手寫代碼實現:
import time
import numpy as np
import math
import random
def loadDataSet(filename):
dataArr = []
labelArr= []
with open(filename,'r') as f:
for line in f.readlines():
line = line.split()
dataArr.append([float(line[0]),float(line[1])])
labelArr.append(float(line[2]))
return dataArr,labelArr
class SVM:
'''
SVM類
'''
def __init__(self,trainDataList,trainLabelList,sigma = 10,C = 200,toler = 0.01):
'''
trainDataList:訓練集數據
testDataList:訓練集標籤
sigma:高斯核參數
C:鬆弛懲罰項
toler:誤差範圍
'''
#訓練集轉爲矩陣
self.trainDataMat = np.mat(trainDataList)
#標籤爲了便利轉爲列向量
self.trainLabelMat = np.mat(trainLabelList).T
#訓練數據m條,n個特徵
self.m,self.n = np.shape(self.trainDataMat)
#其他參數
self.sigma = sigma
self.C = C
self.toler = toler
self.b = 0 #偏置
self.alpha = [0 for i in range(self.m)]
self.E = [0 * self.trainLabelMat[i,0] for i in range(self.m)]
self.supportVecIndex = [] #支持向量的索引
#核函數
self.k = self.calcKernel()
def calcKernel(self):
#k[i][j] = exp(-||X_i - X_j||^2/(2*sigma^2))
#k.shape: m * m
k = [[0 for i in range(self.m)] for i in range(self.m)]
for i in range(self.m):
X_i = self.trainDataMat[i,:]
for j in range(self.m):
X_j = self.trainDataMat[j,:]
X_L2 = (X_i-X_j)*(X_i-X_j).T
k[i][j] = np.exp(-X_L2/(2*self.sigma**2))
k[j][i] = k[i][j]
return k
def calcgxi(self,i):
'''
計算函數g(x)對xi的預測值
'''
#找出非零α提高效率
index = [j for j,alpha in enumerate(self.alpha) if alpha != 0]
#計算g(xi) = w*xi + b
g_xi = 0
for j in index:
g_xi += self.alpha[i] * self.trainLabelMat[i] * self.k[i][j]
g_xi += self.b
return g_xi
def isSatisfyKKT(self,i):
'''
檢查第i個α是否符合kkt條件
'''
g_xi = self.calcgxi(i)
y_i = self.trainLabelMat[i]
if math.fabs(self.alpha[i]) < self.toler and y_i*g_xi >= 1:
return True
elif self.alpha[i] > -self.toler and self.alpha[i] < self.C + self.toler \
and math.fabs(y_i*g_xi - 1) < self.toler:
return True
elif math.fabs(self.alpha[i] - self.C) < self.toler:
return True
else:
return False
def calcEi(self,i):
'''
計算第i個元素預測值和實際值的差
Ei = g(xi) - yi
'''
g_xi = self.calcgxi(i)
E_i = g_xi - self.trainLabelMat[i]
return E_i
def getAlphaJ(self,E1,i):
'''
SMO中選擇第二個變量
:param E1: 第一個變量的E1
:param i: 第一個變量α的下標
:return: E2,α2的下標
'''
#初始化E2
E2 = 0
#|E1-E2| = -1
E1_E2 = -1
maxIndex = -1
nonzeroE = [i for i,E in enumerate(self.E) if E != 0]
# 對每個非零下標遍歷
for j in nonzeroE:
#計算E2
E2Tmp = self.calcEi(j)
#找出使得|E1-E2|最大的E2,並更新
if math.fabs(E1 - E2Tmp) > E1_E2:
E1_E2 = math.fabs(E1 - E2Tmp)
E2 = E2Tmp
maxIndex = j
if maxIndex == -1:
maxIndex = i
while(maxIndex == i):
maxIndex = int(random.uniform(0,self.m))
E2 = self.calcEi(maxIndex)
return E2,maxIndex
def train(self, iter = 100):
#iterStep:迭代次數,超過設置次數還未收斂則強制停止
#parameterChanged:單次迭代中有參數改變則增加1
iterStep = 0; parameterChanged = 1
#如果沒有達到限制的迭代次數以及上次迭代中有參數改變則繼續迭代
#parameterChanged==0時表示上次迭代沒有參數改變,如果遍歷了一遍都沒有參數改變,說明
#達到了收斂狀態,可以停止了
while (iterStep < iter) and (parameterChanged > 0):
#打印當前迭代輪數
print('iter:%d:%d'%( iterStep, iter))
#迭代步數加1
iterStep += 1
#新的一輪將參數改變標誌位重新置0
parameterChanged = 0
#大循環遍歷所有樣本,用於找SMO中第一個變量
for i in range(self.m):
#查看第一個遍歷是否滿足KKT條件,如果不滿足則作爲SMO中第一個變量從而進行優化
if self.isSatisfyKKT(i) == False:
#如果下標爲i的α不滿足KKT條件,則進行優化
#第一個變量α的下標i已經確定,接下來按照“7.4.2 變量的選擇方法”第二步
#選擇變量2。由於變量2的選擇中涉及到|E1 - E2|,因此先計算E1
E1 = self.calcEi(i)
#選擇第2個變量
E2, j = self.getAlphaJ(E1, i)
#參考“7.4.1兩個變量二次規劃的求解方法” P126 下半部分
#獲得兩個變量的標籤
y1 = self.trainLabelMat[i]
y2 = self.trainLabelMat[j]
#複製α值作爲old值
alphaOld_1 = self.alpha[i]
alphaOld_2 = self.alpha[j]
#依據標籤是否一致來生成不同的L和H
if y1 != y2:
L = max(0, alphaOld_2 - alphaOld_1)
H = min(self.C, self.C + alphaOld_2 - alphaOld_1)
else:
L = max(0, alphaOld_2 + alphaOld_1 - self.C)
H = min(self.C, alphaOld_2 + alphaOld_1)
#如果兩者相等,說明該變量無法再優化,直接跳到下一次循環
if L == H: continue
#計算α的新值
#依據“7.4.1兩個變量二次規劃的求解方法”式7.106更新α2值
#先獲得幾個k值,用來計算事7.106中的分母η
k11 = self.k[i][i]
k22 = self.k[j][j]
k21 = self.k[j][i]
k12 = self.k[i][j]
#依據式7.106更新α2,該α2還未經剪切
alphaNew_2 = alphaOld_2 + y2 * (E1 - E2) / (k11 + k22 - 2 * k12)
#剪切α2
if alphaNew_2 < L: alphaNew_2 = L
elif alphaNew_2 > H: alphaNew_2 = H
#更新α1,依據式7.109
alphaNew_1 = alphaOld_1 + y1 * y2 * (alphaOld_2 - alphaNew_2)
#依據“7.4.2 變量的選擇方法”第三步式7.115和7.116計算b1和b2
b1New = -1 * E1 - y1 * k11 * (alphaNew_1 - alphaOld_1) \
- y2 * k21 * (alphaNew_2 - alphaOld_2) + self.b
b2New = -1 * E2 - y1 * k12 * (alphaNew_1 - alphaOld_1) \
- y2 * k22 * (alphaNew_2 - alphaOld_2) + self.b
#依據α1和α2的值範圍確定新b
if (alphaNew_1 > 0) and (alphaNew_1 < self.C):
bNew = b1New
elif (alphaNew_2 > 0) and (alphaNew_2 < self.C):
bNew = b2New
else:
bNew = (b1New + b2New) / 2
#將更新後的各類值寫入,進行更新
self.alpha[i] = alphaNew_1
self.alpha[j] = alphaNew_2
self.b = bNew
self.E[i] = self.calcEi(i)
self.E[j] = self.calcEi(j)
#如果α2的改變量過於小,就認爲該參數未改變,不增加parameterChanged值
#反之則自增1
if math.fabs(alphaNew_2 - alphaOld_2) >= 0.00001:
parameterChanged += 1
#打印迭代輪數,i值,該迭代輪數修改α數目
print("iter: %d i:%d, pairs changed %d" % (iterStep, i, parameterChanged))
#全部計算結束後,重新遍歷一遍α,查找裏面的支持向量
for i in range(self.m):
#如果α>0,說明是支持向量
if self.alpha[i] > 0:
#將支持向量的索引保存起來
self.supportVecIndex.append(i)
def calcSinglKernel(self, x1, x2):
'''
單獨計算核函數
:param x1:向量1
:param x2: 向量2
:return: 核函數結果
'''
# 按照“7.3.3 常用核函數”式7.90計算高斯核
result = (x1 - x2) * (x1 - x2).T
result = np.exp(-1 * result / (2 * self.sigma ** 2))
# 返回結果
return np.exp(result)
def predict(self, x):
'''
對樣本的標籤進行預測
公式依據“7.3.4 非線性支持向量分類機”中的式7.94
:param x: 要預測的樣本x
:return: 預測結果
'''
result = 0
for i in self.supportVecIndex:
# 遍歷所有支持向量,計算求和式
# 如果是非支持向量,求和子式必爲0,沒有必須進行計算
# 這也是爲什麼在SVM最後只有支持向量起作用
# ------------------
# 先單獨將核函數計算出來
tmp = self.calcSinglKernel(self.trainDataMat[i, :], np.mat(x))
# 對每一項子式進行求和,最終計算得到求和項的值
result += self.alpha[i] * self.trainLabelMat[i] * tmp
# 求和項計算結束後加上偏置b
result += self.b
# 使用sign函數返回預測結果
return np.sign(result)
def test(self, testDataList, testLabelList):
'''
測試
:param testDataList:測試數據集
:param testLabelList: 測試標籤集
:return: 正確率
'''
# 錯誤計數值
errorCnt = 0
# 遍歷測試集所有樣本
for i in range(len(testDataList)):
# 打印目前進度
print('test:%d:%d' % (i, len(testDataList)))
# 獲取預測結果
result = self.predict(testDataList[i])
# 如果預測與標籤不一致,錯誤計數值加一
if result != testLabelList[i]:
errorCnt += 1
# 返回正確率
return 1 - errorCnt / len(testDataList)
if __name__ == "__main__":
#記錄程序開始時間
startTime = time.time()
#讀取訓練集
print("Loading Train Data...")
trainDataList,trainLabelList = loadDataSet("traindata.txt")
#讀取測試集
print("Loading Test Data...")
testDataList,testLabelList = loadDataSet("testdata.txt")
#初始化SVM
print("start init svm...")
svm = SVM(trainDataList,trainLabelList,1.3,200,0.01)
# 開始訓練
print('start to train')
svm.train()
# 開始測試
print('start to test')
accuracy = svm.test(testDataList, testLabelList)
print('the accuracy is:%d' % (accuracy * 100), '%')
# 打印時間
print('time span:', time.time() - startTime)