作業及代碼:https://pan.baidu.com/s/1L-Tbo3flzKplAof3fFdD1w 密碼:oin0
本次作業的理論部分:吳恩達機器學習(七)支持向量機
編程環境:Jupyter Notebook
1. 線性 SVM
任務
觀察懲罰項係數 C 對決策邊界的影響,數據集:data/ex6data1.mat
在理論部分,我們得到SVM的代價函數爲:
其中C爲誤差項懲罰係數,C越大,容錯率越低,越易過擬合。
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
data = sio.loadmat('./data/ex6data1.mat')
X,y = data['X'],data['y']
def plot_data():
plt.scatter(X[:,0],X[:,1],c = y.flatten(), cmap ='jet')
plt.xlabel('x1')
plt.ylabel('y1')
plot_data() # 繪製原始數據
由圖可知,左上角的那個數據點爲異常點(誤差點)。
Scikit-learn ,kernel=‘linear’
簡稱 sklearn,參考官方中文文檔:http://sklearn.apachecn.org
提供了很多機器學習的庫,本次作業主要也是用它來解決SVM的問題
- C = 1
from sklearn.svm import SVC
svc1 = SVC(C=1,kernel='linear') #實例化分類器,C爲誤差項懲罰係數,核函數選擇線性核
svc1.fit(X,y.flatten()) #導入數據進行訓練
>>> svc1.score(X,y.flatten()) #分類器的準確率
> 0.9803921568627451
# 繪製決策邊界
def plot_boundary(model):
x_min,x_max = -0.5,4.5
y_min,y_max = 1.3,5
xx,yy = np.meshgrid(np.linspace(x_min,x_max,500),
np.linspace(y_min,y_max,500))
z = model.predict(np.c_[xx.flatten(),yy.flatten()])
zz = z.reshape(xx.shape)
plt.contour(xx,yy,zz)
plot_boundary(svc1)
plot_data()
- C = 100
svc100 = SVC(C=100,kernel='linear')
svc100.fit(X,y.flatten())
>>>svc100.score(X,y.flatten())
> 1.0
#繪製決策邊界
plot_boundary(svc100)
plot_data()
結論
誤差項懲罰係數C越大,容錯率越低,越易過擬合。
2. 非線性 SVM
任務
使用高斯核函數解決線性不可分問題,並觀察 取值對模型複雜度的影響。數據集:data/ex6data2.mat
高斯核函數公式:
data = sio.loadmat('./data/ex6data2.mat')
X,y = data['X'],data['y']
def plot_data():
plt.scatter(X[:,0],X[:,1],c = y.flatten(), cmap ='jet')
plt.xlabel('x1')
plt.ylabel('y1')
plot_data() # 繪製原始數據
Scikit-learn ,kernel=‘rbf’
- ,注意:sklearn中的表示爲
gammer
,高斯核表示爲rbf
svc1 = SVC(C=1,kernel='rbf',gamma=1) #實例化分類器,C爲誤差項懲罰係數,核函數選擇高斯核
svc1.fit(X,y.flatten()) #導入數據進行訓練
>>> svc1.score(X,y.flatten()) #分類器的準確率
> 0.8088064889918888
# 繪製決策邊界
def plot_boundary(model):
x_min,x_max = 0,1
y_min,y_max = 0.4,1
xx,yy = np.meshgrid(np.linspace(x_min,x_max,500),
np.linspace(y_min,y_max,500))
z = model.predict(np.c_[xx.flatten(),yy.flatten()])
zz = z.reshape(xx.shape)
plt.contour(xx,yy,zz)
plot_boundary(svc1)
plot_data()
結論
值越大,模型複雜度越高,同時也越易過擬合
值越小,模型複雜度越低,同時也越易欠擬合
3. 尋找最優參數 C 和
數據集:data/ex6data3.mat
mat = sio.loadmat('data/ex6data3.mat')
X, y = mat['X'], mat['y'] # 訓練集
Xval, yval = mat['Xval'], mat['yval'] # 驗證集
def plot_data():
plt.scatter(X[:,0],X[:,1],c = y.flatten(), cmap ='jet')
plt.xlabel('x1')
plt.ylabel('y1')
plot_data() # 繪製原始數據
# C 和 σ 的候選值
Cvalues = [3, 10, 30, 100,0.01, 0.03, 0.1, 0.3,1 ] #9
gammas = [1 ,3, 10, 30, 100,0.01, 0.03, 0.1, 0.3] #9
# 獲取最佳準確率和最優參數
best_score = 0
best_params = (0,0)
for c in Cvalues:
for gamma in gammas:
svc = SVC(C=c,kernel='rbf',gamma=gamma)
svc.fit(X,y.flatten()) # 用訓練集數據擬合模型
score = svc.score(Xval,yval.flatten()) # 用驗證集數據進行評分
if score > best_score:
best_score = score
best_params = (c,gamma)
>>> print(best_score,best_params)
> 0.965 (3, 30)
注意:獲取到的最優參數組合不只有一組,更改候選值的順序,最佳參數組合及其對應的決策邊界也會改變
svc2 = SVC(C=3,kernel='rbf',gamma=30)
def plot_boundary(model):
x_min,x_max = -0.6,0.4
y_min,y_max = -0.7,0.6
xx,yy = np.meshgrid(np.linspace(x_min,x_max,500),
np.linspace(y_min,y_max,500))
z = model.predict(np.c_[xx.flatten(),yy.flatten()])
zz = z.reshape(xx.shape)
plt.contour(xx,yy,zz)
plot_boundary(svc2)
plot_data()
4. 垃圾郵件過濾問題
注意:data/spamTrain.mat是對郵件進行預處理後(自然語言處理)獲得的向量
# training data
data1 = sio.loadmat('data/spamTrain.mat')
X, y = data1['X'], data1['y']
# Testing data
data2 = sio.loadmat('data/spamTest.mat')
Xtest, ytest = data2['Xtest'], data2['ytest']
>>> X.shape,y.shape # 樣本數爲4000
> ((4000, 1899), (4000, 1))
>>> X # 每一行代表一個郵件樣本,每個樣本有1899個特徵,特徵爲1表示在跟垃圾郵件有關的語義庫中找到相關單詞
> array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
...,
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 1, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0]], dtype=uint8)
>>> y # 每一行代表一個郵件樣本,等於1表示爲垃圾郵件
> array([[1],
[1],
[0],
...,
[1],
[0],
[0]], dtype=uint8)
# 候選的 C值
Cvalues = [3, 10, 30, 100,0.01, 0.03, 0.1, 0.3,1 ]
# 獲取最佳準確率和最優參數
best_score = 0
best_param = 0
for c in Cvalues:
svc = SVC(C=c,kernel='linear')
svc.fit(X,y.flatten()) # 用訓練集數據擬合模型
score= svc.score(Xtest,ytest.flatten()) # 用驗證集數據進行評分
if score > best_score:
best_score = score
best_param = c
>>> print(best_score,best_param)
> 0.99 0.03
# 帶入最佳參數
svc = SVC(0.03,kernel='linear')
svc.fit(X,y.flatten())
score_train= svc.score(X,y.flatten())
score_test= svc.score(Xtest,ytest.flatten())
>>> print(score_train,score_test)
> 0.99425 0.99
附:郵件預處理
with open('data/emailSample1.txt', 'r') as f:
sampe_email = f.read()
print(sampe_email)
'''
預處理主要包括以下8個部分:
1. 將大小寫統一成小寫字母;
2. 移除所有HTML標籤,只保留內容。
3. 將所有的網址替換爲字符串 “httpaddr”.
4. 將所有的郵箱地址替換爲 “emailaddr”
5. 將所有dollar符號($)替換爲“dollar”.
6. 將所有數字替換爲“number”
7. 將所有單詞還原爲詞源,詞幹提取
8. 移除所有非文字類型
9.去除空字符串‘’
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from sklearn import svm
import nltk.stem as ns
import re
def preprocessing(email):
# 1. 統一成小寫
email = email.lower()
#2. 去除html標籤
email = re.sub('<[^<>]>', ' ', email)
#3. 將網址替換爲字符串 “httpaddr”.
email = re.sub('(http|https)://[^\s]*', 'httpaddr', email )
#4. 將郵箱地址替換爲 “emailaddr”
email = re.sub('[^\s]+@[^\s]+', 'emailaddr', email)
# 5.所有dollar符號($)替換爲“dollar”.
email = re.sub('[\$]+', 'dollar', email)
# 6.匹配數字,將數字替換爲“number”
email = re.sub('[0-9]+', 'number', email) # 匹配一個數字, 相當於 [0-9],+ 匹配1到多次
# 7. 詞幹提取
tokens = re.split('[ \@\$\/\#\.\-\:\&\*\+\=\[\]\?\!\(\)\{\}\,\'\"\>\_\<\;\%]', email)
tokenlist=[]
s = ns.SnowballStemmer('english')
for token in tokens:
# 8. 移除非文字類型
email = re.sub('[^a-zA-Z0-9]', '', email)
stemmed = s.stem(token)
# 9.去除空字符串‘’
if not len(token): continue
tokenlist.append(stemmed)
return tokenlist
email = preprocessing(sampe_email)
def email2VocabIndices(email, vocab):
"""提取存在單詞的索引"""
token = preprocessing(email)
print(token)
index = [i for i in range(len(token)) if token[i] in vocab]
return index
def email2FeatureVector(email):
"""
將email轉化爲詞向量,n是vocab的長度。存在單詞的相應位置的值置爲1,其餘爲0
"""
df = pd.read_table('data/vocab.txt',names=['words'])
vocab = df.values # return array
vector = np.zeros(len(vocab)) # init vector
vocab_indices = email2VocabIndices(email, vocab)
print(vocab_indices)# 返回含有單詞的索引
# 將有單詞的索引置爲1
for i in vocab_indices:
vector[i] = 1
return vector
import pandas as pd
vector = email2FeatureVector(sampe_email)
>>> print('length of vector = {}\nnum of non-zero = {}'.format(len(vector), int(vector.sum())))
> ['anyon', 'know', 'how', 'much', 'it', 'cost', 'to', 'host', 'a', 'web', 'portal', '\n', '\nwell', 'it', 'depend', 'on', 'how', 'mani', 'visitor', 'you', 're', 'expect', '\nthis', 'can', 'be', 'anywher', 'from', 'less', 'than', 'number', 'buck', 'a', 'month', 'to', 'a', 'coupl', 'of', 'dollarnumb', '\nyou', 'should', 'checkout', 'httpaddr', 'or', 'perhap', 'amazon', 'ecnumb', '\nif', 'your', 'run', 'someth', 'big', '\n\nto', 'unsubscrib', 'yourself', 'from', 'this', 'mail', 'list', 'send', 'an', 'email', 'to', '\nemailaddr\n\n']
[0, 1, 2, 3, 4, 5, 6, 7, 9, 13, 14, 15, 16, 17, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 32, 33, 35, 36, 37, 39, 41, 42, 43, 47, 48, 49, 50, 52, 53, 54, 56, 57, 58, 59, 60, 61]
length of vector = 1899
num of non-zero = 46
>>> vector.shape
> (1899,)