十二、支持向量機(SVM)
1.原理
- 尋求最優分類邊界:
正確:對大部分樣本可以正確地劃分類別。
泛化:最大化支持向量間距。
公平:與支持向量等距。
簡單:線性,直線或平面,分割超平面。 - 基於核函數的升維變換:
通過名爲核函數的特徵變換,增加新的特徵,使得低維度空間中的線性不可分問題變爲高維度空間中的線性可分問題。
2.不同核函數的分類效果
- 線性核函數:linear,不通過核函數進行維度提升,盡在原始維度空間中尋求線性分類邊界。
代碼:svm_line.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp x, y = [], [] with open('../../data/multiple2.txt', 'r') as f: for line in f.readlines(): data = [float(substr) for substr in line.split(',')] x.append(data[:-1]) y.append(data[-1]) x = np.array(x) y = np.array(y, dtype=int) train_x, test_x, train_y, test_y = \ ms.train_test_split( x, y, test_size=0.25, random_state=5) # 基於線性核函數的支持向量機分類器 model = svm.SVC(kernel='linear') model.fit(train_x, train_y) l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005 b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005 grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v)) flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()] flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) pred_test_y = model.predict(test_x) cr = sm.classification_report(test_y, pred_test_y) print(cr) mp.figure('SVM Linear Classification', facecolor='lightgray') mp.title('SVM Linear Classification', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.show()
- 多項式核函數:poly,通過多項式函數增加原始樣本特徵的高次方冪
x1 x2 -> y
x1 x2 x1^2 x1x2 x2^2 -> y 2次多項式升維
x1 x2 x1^3 x1^2x2 x1x2^2 x2^3 -> y 3次多項式升維
代碼:svm_poly.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp x, y = [], [] with open('../../data/multiple2.txt', 'r') as f: for line in f.readlines(): data = [float(substr) for substr in line.split(',')] x.append(data[:-1]) y.append(data[-1]) x = np.array(x) y = np.array(y, dtype=int) train_x, test_x, train_y, test_y = \ ms.train_test_split( x, y, test_size=0.25, random_state=5) # 基於3次多項式核函數的支持向量機分類器 model = svm.SVC(kernel='poly', degree=3) model.fit(train_x, train_y) l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005 b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005 grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v)) flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()] flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) pred_test_y = model.predict(test_x) cr = sm.classification_report(test_y, pred_test_y) print(cr) mp.figure('SVM Polynomial Classification', facecolor='lightgray') mp.title('SVM Polynomial Classification', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.show()
- 徑向基核函數:rbf,通過高斯分佈函數增加原始樣本特徵的分佈概率
代碼:svm_rbf.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp x, y = [], [] with open('../../data/multiple2.txt', 'r') as f: for line in f.readlines(): data = [float(substr) for substr in line.split(',')] x.append(data[:-1]) y.append(data[-1]) x = np.array(x) y = np.array(y, dtype=int) train_x, test_x, train_y, test_y = \ ms.train_test_split( x, y, test_size=0.25, random_state=5) # 基於徑向基核函數的支持向量機分類器 model = svm.SVC(kernel='rbf', C=600, gamma=0.01) model.fit(train_x, train_y) l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005 b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005 grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v)) flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()] flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) pred_test_y = model.predict(test_x) cr = sm.classification_report(test_y, pred_test_y) print(cr) mp.figure('SVM RBF Classification', facecolor='lightgray') mp.title('SVM RBF Classification', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.show()
3.樣本類別均衡化
- ..., class_weight='balanced', ...
通過類別權重的均衡化,使所佔比例較小的樣本權重較高,而所佔比例較大的樣本權重較低,以此平均化不同類別樣本對分類模型的貢獻,提高模型性能。
代碼:svm_bal.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp x, y = [], [] with open('../../data/imbalance.txt', 'r') as f: for line in f.readlines(): data = [float(substr) for substr in line.split(',')] x.append(data[:-1]) y.append(data[-1]) x = np.array(x) y = np.array(y, dtype=int) train_x, test_x, train_y, test_y = \ ms.train_test_split( x, y, test_size=0.25, random_state=5) # 帶有類別權重均衡的支持向量機分類器 model = svm.SVC(kernel='rbf', C=100, gamma=1, class_weight='balanced') model.fit(train_x, train_y) l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005 b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005 grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v)) flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()] flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) pred_test_y = model.predict(test_x) cr = sm.classification_report(test_y, pred_test_y) print(cr) mp.figure('SVM Balanced Classification', facecolor='lightgray') mp.title('SVM Balanced Classification', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.show()
4.置信概率
- 根據樣本與分類邊界的距離遠近,對其預測類別的可信程度進行量化,離邊界越近的樣本,置信概率越高,反之,離邊界越遠的樣本,置信概率越低。
構造model時指定參數,probability=True
model.predict_proba(輸入樣本矩陣)->置信概率矩陣
預測結果(model.predict()函數返回):
樣本1 類別1
樣本2 類別1
樣本3 類別2
置信概率矩陣:
類別1 類別2
樣本1 0.8 0.2
樣本2 0.9 0.1
樣本3 0.4 0.5
代碼:svm_prob.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp x, y = [], [] with open('../../data/multiple2.txt', 'r') as f: for line in f.readlines(): data = [float(substr) for substr in line.split(',')] x.append(data[:-1]) y.append(data[-1]) x = np.array(x) y = np.array(y, dtype=int) train_x, test_x, train_y, test_y = \ ms.train_test_split( x, y, test_size=0.25, random_state=5) # 能夠計算置信概率的支持向量機分類器 model = svm.SVC(kernel='rbf', C=600, gamma=0.01, probability=True) model.fit(train_x, train_y) l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005 b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005 grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v)) flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()] flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) pred_test_y = model.predict(test_x) cr = sm.classification_report(test_y, pred_test_y) print(cr) prob_x = np.array([ [2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]) print(prob_x) pred_prob_y = model.predict(prob_x) print(pred_prob_y) probs = model.predict_proba(prob_x) print(probs) mp.figure('SVM Confidence Probability', facecolor='lightgray') mp.title('SVM Confidence Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:, 0], prob_x[:, 1], c=pred_prob_y, cmap='cool', s=70, marker='D') for i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100, 2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle': 'round,pad=0.6', 'fc': 'orange', 'alpha': 0.8}) mp.show()
5.網格搜索
- ms.GridSearchCV(模型, 超參數組合列表, cv=摺疊數)
->模型對象
模型對象.fit(輸入集,輸出集)
針對超參數組合列表中的每一個超參數組合,實例化給定的模型,做cv次交叉驗證,將其中平均f1得分最高的超參數組合作爲最佳選擇,實例化模型對象。
代碼:svm_gs.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp x, y = [], [] with open('../../data/multiple2.txt', 'r') as f: for line in f.readlines(): data = [float(substr) for substr in line.split(',')] x.append(data[:-1]) y.append(data[-1]) x = np.array(x) y = np.array(y, dtype=int) train_x, test_x, train_y, test_y = \ ms.train_test_split( x, y, test_size=0.25, random_state=5) # 超參數組合列表 params = [ {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}, {'kernel': ['poly'], 'C': [1], 'degree': [2, 3]}, {'kernel': ['rbf'], 'C': [1, 10, 100, 1000], 'gamma': [1, 0.1, 0.01, 0.001]}] # 網格搜索尋優 model = ms.GridSearchCV( svm.SVC(probability=True), params, cv=5) model.fit(train_x, train_y) for param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, score) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005 b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005 grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v)) flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()] flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) pred_test_y = model.predict(test_x) cr = sm.classification_report(test_y, pred_test_y) print(cr) prob_x = np.array([ [2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]) print(prob_x) pred_prob_y = model.predict(prob_x) print(pred_prob_y) probs = model.predict_proba(prob_x) print(probs) mp.figure('Grid Search', facecolor='lightgray') mp.title('Grid Search', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:, 0], prob_x[:, 1], c=pred_prob_y, cmap='cool', s=70, marker='D') for i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100, 2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle': 'round,pad=0.6', 'fc': 'orange', 'alpha': 0.8}) mp.show()
6.事件預測
代碼:svm_evt.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import sklearn.preprocessing as sp
import sklearn.model_selection as ms
import sklearn.svm as svm
class DigitEncoder():
def fit_transform(self, y):
return y.astype(int)
def transform(self, y):
return y.astype(int)
def inverse_transform(self, y):
return y.astype(str)
data = []
# 二元分類
# with open('../../data/event.txt', 'r') as f:
# 多元分類
with open('../../data/events.txt', 'r') as f:
for line in f.readlines():
data.append(line[:-1].split(','))
data = np.delete(np.array(data).T, 1, 0)
encoders, x = [], []
for row in range(len(data)):
if data[row][0].isdigit():
encoder = DigitEncoder()
else:
encoder = sp.LabelEncoder()
if row < len(data) - 1:
x.append(encoder.fit_transform(data[row]))
else:
y = encoder.fit_transform(data[row])
encoders.append(encoder)
x = np.array(x).T
train_x, test_x, train_y, test_y = \
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(kernel='rbf',
class_weight='balanced')
print(ms.cross_val_score(
model, train_x, train_y, cv=3,
scoring='accuracy').mean())
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print((pred_test_y == test_y).sum() /
pred_test_y.size)
data = [['Tuesday', '12:30:00', '21', '23']]
data = np.array(data).T
x = []
for row in range(len(data)):
encoder = encoders[row]
x.append(encoder.transform(data[row]))
x = np.array(x).T
pred_y = model.predict(x)
print(encoders[-1].inverse_transform(pred_y))
7.交通流量預測(迴歸)
代碼:svm_trf.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import sklearn.preprocessing as sp
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
class DigitEncoder():
def fit_transform(self, y):
return y.astype(int)
def transform(self, y):
return y.astype(int)
def inverse_transform(self, y):
return y.astype(str)
data = []
# 迴歸
with open('../../data/traffic.txt', 'r') as f:
for line in f.readlines():
data.append(line[:-1].split(','))
data = np.array(data).T
encoders, x = [], []
for row in range(len(data)):
if data[row][0].isdigit():
encoder = DigitEncoder()
else:
encoder = sp.LabelEncoder()
if row < len(data) - 1:
x.append(encoder.fit_transform(data[row]))
else:
y = encoder.fit_transform(data[row])
encoders.append(encoder)
x = np.array(x).T
train_x, test_x, train_y, test_y = \
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
# 支持向量機迴歸器
model = svm.SVR(kernel='rbf', C=10, epsilon=0.2)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.r2_score(test_y, pred_test_y))
data = [['Tuesday', '13:35', 'San Francisco', 'yes']]
data = np.array(data).T
x = []
for row in range(len(data)):
encoder = encoders[row]
x.append(encoder.transform(data[row]))
x = np.array(x).T
pred_y = model.predict(x)
print(int(pred_y))
十三、聚類
分類 vs. 聚類
class cluster
有監督 無監督
1.樣本相似性:歐氏距離
歐幾里得
《幾何原理》
P(x1) - Q(x2): |x1-x2| = sqrt((x1-x2)^2)
P(x1,y1) - Q(x2,y2): sqrt((x1-x2)^2+(y1-y2)^2)
P(x1,y1,z1) - Q(x2,y2,z2):
sqrt((x1-x2)^2+(y1-y2)^2+(z1-z2)^2)
用兩個樣本對應特徵值之差的平方和之平方根,即歐氏距離,來表示這兩個樣本的相似性。
2.K均值算法
第一步:隨機選擇k個樣本作爲k個聚類的中心,計算每個樣本到各個聚類中心的歐氏距離,將該樣本分配到與之距離最近的聚類中心所在的類別中。
第二步:根據第一步所得到的聚類劃分,分別計算每個聚類的幾何中心,將幾何中心作爲新的聚類中心,重複第一步,直到計算所得幾何中心與聚類中心重合或接近重合爲止。
- 聚類數k必須事先已知。
藉助某些評估指標,優選最好的聚類數。 - 聚類中心的初始選擇會影響到最終聚類劃分的結果。
初始中心儘量選擇距離較遠的樣本。
代碼:km.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.cluster as sc import matplotlib.pyplot as mp x = [] with open('../../data/multiple3.txt', 'r') as f: for line in f.readlines(): data = [float(substr) for substr in line.split(',')] x.append(data) x = np.array(x) # K均值聚類器 model = sc.KMeans(n_clusters=4) model.fit(x) centers = model.cluster_centers_ l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005 b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005 grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v)) flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()] flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) pred_y = model.predict(x) mp.figure('K-Means Cluster', facecolor='lightgray') mp.title('K-Means Cluster', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(x[:, 0], x[:, 1], c=pred_y, cmap='brg', s=80) mp.scatter(centers[:, 0], centers[:, 1], marker='+', c='gold', s=1000, linewidth=1) mp.show()
圖像量化
代碼:quant.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import scipy.misc as sm import scipy.ndimage as sn import sklearn.cluster as sc import matplotlib.pyplot as mp # 通過K均值聚類量化圖像中的顏色 def quant(image, n_clusters): x = image.reshape(-1, 1) model = sc.KMeans(n_clusters=n_clusters) model.fit(x) y = model.labels_ centers = model.cluster_centers_.squeeze() return centers[y].reshape(image.shape) original = sm.imread('../../data/lily.jpg', True) quant4 = quant(original, 4) quant3 = quant(original, 3) quant2 = quant(original, 2) mp.figure('Image Quant', facecolor='lightgray') mp.subplot(221) mp.title('Original', fontsize=16) mp.axis('off') mp.imshow(original, cmap='gray') mp.subplot(222) mp.title('Quant-4', fontsize=16) mp.axis('off') mp.imshow(quant4, cmap='gray') mp.subplot(223) mp.title('Quant-3', fontsize=16) mp.axis('off') mp.imshow(quant3, cmap='gray') mp.subplot(224) mp.title('Quant-2', fontsize=16) mp.axis('off') mp.imshow(quant2, cmap='gray') mp.tight_layout() mp.show()
3.均值漂移算法
首先假定樣本空間中的每個聚類均服從某種已知的概率分佈規則,然後用不同的概率密度函數擬合樣本中的統計直方圖,不斷移動密度函數的中心(均值)的位置,直到獲得最佳擬合效果爲止。這些概率密度函數的峯值點就是聚類的中心,再根據每個樣本距離各個中心的距離,選擇最近聚類中心所屬的類別作爲該樣本的類別。
- 聚類數不必事先已知,算法會自動識別出統計直方圖的中心數量。
- 聚類中心不依據於最初假定,聚類劃分的結果相對穩定。
- 樣本空間應該服從某種概率分佈規則,否則算法的準確性會大打折扣。
代碼:shift.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.cluster as sc import matplotlib.pyplot as mp x = [] with open('../../data/multiple3.txt', 'r') as f: for line in f.readlines(): data = [float(substr) for substr in line.split(',')] x.append(data) x = np.array(x) # 量化帶寬,決定每次調整概率密度函數的步進量 bw = sc.estimate_bandwidth(x, n_samples=len(x), quantile=0.1) # 均值漂移聚類器 model = sc.MeanShift(bandwidth=bw, bin_seeding=True) model.fit(x) centers = model.cluster_centers_ l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005 b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005 grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v)) flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()] flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) pred_y = model.predict(x) mp.figure('Mean Shift Cluster', facecolor='lightgray') mp.title('Mean Shift Cluster', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(x[:, 0], x[:, 1], c=pred_y, cmap='brg', s=80) mp.scatter(centers[:, 0], centers[:, 1], marker='+', c='gold', s=1000, linewidth=1) mp.show()
4.凝聚層次算法
首先假定每個樣本都是一個獨立的聚類,如果統計出來的聚類數大於期望的聚類數,則從每個樣本出發尋找離自己最近的另一個樣本,與之聚集,形成更大的聚類,同時令總聚類數減少,不斷重複以上過程,直到統計出來的聚類數達到期望值爲止。
- 聚類數k必須事先已知。
藉助某些評估指標,優選最好的聚類數。 - 沒有聚類中心的概念,因此只能在訓練集中劃分聚類,但不能對訓練集以外的未知樣本確定其聚類歸屬。
- 在確定被凝聚的樣本時,除了以距離作爲條件以外,還可以根據連續性來確定被聚集的樣本。
代碼:agglo.py、spiral.py# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.cluster as sc import matplotlib.pyplot as mp x = [] with open('../../data/multiple3.txt', 'r') as f: for line in f.readlines(): data = [float(substr) for substr in line.split(',')] x.append(data) x = np.array(x) # 凝聚層次聚類器 model = sc.AgglomerativeClustering(n_clusters=4) pred_y = model.fit_predict(x) mp.figure('Agglomerative Cluster', facecolor='lightgray') mp.title('Agglomerative Cluster', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.scatter(x[:, 0], x[:, 1], c=pred_y, cmap='brg', s=80) mp.show()
# -*- coding: utf-8 -*- from __future__ import unicode_literals import numpy as np import sklearn.cluster as sc import sklearn.neighbors as nb import matplotlib.pyplot as mp n_samples = 500 t = 2.5 * np.pi * (1 + 2 * np.random.rand( n_samples, 1)) x = 0.05 * t * np.cos(t) y = 0.05 * t * np.sin(t) n = 0.05 * np.random.rand(n_samples, 2) x = np.hstack((x, y)) + n # 無連續性的凝聚層次聚類器 model_nonc = sc.AgglomerativeClustering( linkage='average', n_clusters=3) pred_y_nonc = model_nonc.fit_predict(x) # 近鄰篩選器 conn = nb.kneighbors_graph( x, 10, include_self=False) # 有連續性的凝聚層次聚類器 model_conn = sc.AgglomerativeClustering( linkage='average', n_clusters=3, connectivity=conn) pred_y_conn = model_conn.fit_predict(x) mp.figure('Nonconnectivity', facecolor='lightgray') mp.title('Nonconnectivity', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.axis('equal') mp.scatter(x[:, 0], x[:, 1], c=pred_y_nonc, cmap='brg', alpha=0.5, s=60) mp.figure('Connectivity', facecolor='lightgray') mp.title('Connectivity', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.axis('equal') mp.scatter(x[:, 0], x[:, 1], c=pred_y_conn, cmap='brg', alpha=0.5, s=60) mp.show()