機器學習地基

3-12數據集加載和可視化

鳶尾花數據集的加載和可視化。

import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
print(iris.keys())
print(iris.DESCR)
print(iris.data.shape)
X = iris.data[:, :2]
y = iris.target
#X,y兩個不同的數據,沒有關聯起來,這是怎麼匹配到的?不太理解X[y==0,0]這個代碼
plt.scatter(X[y==0,0], X[y==0,1], color="red")#取出X中y==0的多行數據的第0列和第1列
plt.scatter(X[y==1,0], X[y==1,1], color="blue")
plt.scatter(X[y==2,0], X[y==2,1], color="green")
plt.show()

4-1 KNN算法

import numpy as np
raw_data_X = [[3.393533211, 2.331273381],
              [3.110073483, 1.781539638],
              [1.343808831, 3.368360954],
              [3.582294042, 4.679179110],
              [2.280362439, 2.866990263],
              [7.423436942, 4.696522875],
              [5.745051997, 3.533989803],
              [9.172168622, 2.511101045],
              [7.792783481, 3.424088941],
              [7.939820817, 0.791637231]
             ]
raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
X_train = np.array(raw_data_X)
y_train = np.array(raw_data_y)
x = np.array([8.093607318, 3.365731514])
from math import sqrt
distances = [sqrt(np.sum((x_train - x)**2)) for x_train in X_train]#x到X的所有點距離
nearest = np.argsort(distances)#返回排序列表array([8, 7, 5, 6, 9, 3, 0, 1, 4, 2])
k = 6
topK_y = [y_train[neighbor] for neighbor in nearest[:k]]#[1, 1, 1, 1, 1, 0]
from collections import Counter
votes = Counter(topK_y)#Counter({0: 1, 1: 5})
predict_y = votes.most_common(1)[0][0]#votes.most_common(1)找到票數最多的1個元素
print(predict_y)

4-2封裝KNN

sklearn中的KNN

import numpy as np
from sklearn.neighbors import KNeighborsClassifier
raw_data_X = [[3.393533211, 2.331273381],
              [3.110073483, 1.781539638],
              [1.343808831, 3.368360954],
              [3.582294042, 4.679179110],
              [2.280362439, 2.866990263],
              [7.423436942, 4.696522875],
              [5.745051997, 3.533989803],
              [9.172168622, 2.511101045],
              [7.792783481, 3.424088941],
              [7.939820817, 0.791637231]
             ]
raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
X_train = np.array(raw_data_X)
y_train = np.array(raw_data_y)
x = np.array([[8.093607318, 3.365731514]])
kNN_classifier = KNeighborsClassifier(n_neighbors=6)
kNN_classifier.fit(X_train, y_train)
print(kNN_classifier.predict(x))

自己實現:

KNN.py

import numpy as np
from math import sqrt
from collections import Counter

class KNNClassifier:
    def __init__(self, k):
        """初始化kNN分類器"""
        assert k >= 1, "k must be valid"
        self.k = k
        self._X_train = None
        self._y_train = None

    def fit(self, X_train, y_train):
        """根據訓練數據集X_train和y_train訓練kNN分類器"""
        assert X_train.shape[0] == y_train.shape[0], "the size of X_train must be equal to the size of y_train"
        assert self.k <= X_train.shape[0], "the size of X_train must be at least k."
        self._X_train = X_train
        self._y_train = y_train
        return self

    def predict(self, X_predict):
        """給定待預測數據集X_predict,返回表示X_predict的結果向量"""
        assert self._X_train is not None and self._y_train is not None, "must fit before predict!"
        assert X_predict.shape[1] == self._X_train.shape[1], "the feature number of X_predict must be equal to X_train"
        y_predict = [self._predict(x) for x in X_predict]
        return np.array(y_predict)

    def _predict(self, x):
        """給定單個待預測數據x,返回x的預測結果值"""
        assert x.shape[0] == self._X_train.shape[1], "the feature number of x must be equal to X_train"
        distances = [sqrt(np.sum((x_train - x) ** 2)) for x_train in self._X_train]
        nearest = np.argsort(distances)
        topK_y = [self._y_train[i] for i in nearest[:self.k]]
        votes = Counter(topK_y)
        return votes.most_common(1)[0][0]

    def __repr__(self):
        return "KNN(k=%d)" % self.k

test.py

from KNN import KNNClassifier
import numpy as np
raw_data_X = [[3.393533211, 2.331273381],
              [3.110073483, 1.781539638],
              [1.343808831, 3.368360954],
              [3.582294042, 4.679179110],
              [2.280362439, 2.866990263],
              [7.423436942, 4.696522875],
              [5.745051997, 3.533989803],
              [9.172168622, 2.511101045],
              [7.792783481, 3.424088941],
              [7.939820817, 0.791637231]
             ]
raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
X_train = np.array(raw_data_X)
y_train = np.array(raw_data_y)
x = np.array([[8.093607318, 3.365731514]])
knn_clf = KNNClassifier(k=6)
knn_clf.fit(X_train, y_train)
print(knn_clf.predict(x)[0])#輸出1

4-3訓練數據集測試集拆分

model_selection.py

import numpy as np

def train_test_split(X, y, test_ratio=0.2, seed=None):
    assert X.shape[0] == y.shape[0], "the size of X must be equal to the size of y"
    assert 0.0 <= test_ratio <=1.0, "est_ration must be valid"
    if seed:
        np.random.seed(seed)
    shuffled_indexs = np.random.permutation(len(X))
    test_size = int(len(X) * test_ratio)
    test_indexes = shuffled_indexs[:test_size]
    train_indexes = shuffled_indexs[test_size:]
    X_train = X[train_indexes]
    y_train = y[train_indexes]
    X_test = X[test_indexes]
    y_test = y[test_indexes]
    return X_train, X_test, y_train, y_test

測試自己實現的拆分訓練測試數據集

test.py

from sklearn import datasets
from model_selection import train_test_split
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
print(X_train.shape)
print(y_train.shape)

調用sklearning庫:

test.py

from sklearn import datasets
from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)
print(X_train.shape)
print(y_train.shape)

用訓練和測試集測試算法準確率

from KNN import KNNClassifier
from sklearn import datasets
from model_selection import train_test_split
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
my_knn_clf = KNNClassifier(k=3)
my_knn_clf.fit(X_train, y_train)
y_predict = my_knn_clf.predict(X_test)
print(y_predict)
print(y_test)
print(sum(y_predict == y_test)/len(y_test))

4-4 分類準確率

使用sklearn的digit數據集

test.py

from sklearn import datasets
from model_selection import train_test_split
from KNN import KNNClassifier
digits = datasets.load_digits()
# print(digits.DESCR)
X = digits.data
y = digits.target
# print(X.shape)
# print(X[:10])
# print(y[:100])
# some_digit = X[0]
# some_digit_image = some_digit.reshape(8, 8)
# print(some_digit_image)
# import matplotlib
# import matplotlib.pyplot as plt
# plt.imshow(some_digit_image, cmap = matplotlib.cm.binary)
# plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_ratio=0.2)
my_knn_clf = KNNClassifier(k=3)
my_knn_clf.fit(X_train, y_train)
y_predict = my_knn_clf.predict(X_test)
print("knn算法的預測準確率:", sum(y_predict == y_test)/len(y_test))

封裝準確率方法

matrics.py

def accuracy_score(y_true, y_predict):
    '''計算y_true, y_predict之間的準確率'''
    assert y_true.shape[0] == y_predict.shape[0], "the size of y_true must be equal to the size of y_predict"
    return sum(y_true == y_predict)/len(y_true)

test.py

.....

print("knn算法的預測準確率:", sum(y_predict == y_test)/len(y_test))
from matrics import accuracy_score
print("knn算法的預測準確率:", accuracy_score(y_test, y_predict))

將KNN算法中封裝準確率函數

KNN.py

    def score(self, X_test, y_test):
        '''根據數據集和測試集求出準確率'''
        y_predict = self.predict(X_test)
        return accuracy_score(y_test, y_predict)

test.py

from sklearn import datasets
from model_selection import train_test_split
from KNN import KNNClassifier
digits = datasets.load_digits()
X = digits.data
y = digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_ratio=0.2)
my_knn_clf = KNNClassifier(k=3)
my_knn_clf.fit(X_train, y_train)
y_predict = my_knn_clf.predict(X_test)
print("knn算法的預測準確率:", sum(y_predict == y_test)/len(y_test))
from matrics import accuracy_score
print("knn算法的預測準確率:", accuracy_score(y_test, y_predict))
print("knn算法的預測準確率:", my_knn_clf.score(X_test, y_test))

sklearn中的KNN預測算法

from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
y = digits.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier(n_neighbors=3)
knn_clf.fit(X_train, y_train)
print(knn_clf.score(X_test, y_test))

4-5超參數

尋找最好的K和method

import numpy as np
from sklearn import datasets
digits = datasets.load_digits()
X = digits.data
y = digits.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)
from sklearn.neighbors import  KNeighborsClassifier
best_score = 0.0
best_k = -1
best_method = ""
for method in ["uniform", "distance"]:
    for k in range(1, 11):
        knn_clf = KNeighborsClassifier(n_neighbors=k, weights=method)#weight默認uniform
        knn_clf.fit(X_train, y_train)
        score = knn_clf.score(X_test, y_test)
        if score > best_score:
            best_k = k
            best_score = score
            best_method = method
print("best method: ", best_method)
print("best k: ", best_k)
print("best score: ", best_score)

歐拉距離和曼哈頓距離

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章