kNN實戰之識別鳶尾花

一、說明

我是在jupyter完成的,然後導出成markdown格式,ipynb文件導出爲markdown的命令如下:

jupyter nbconvert --to markdown xxx.ipynb

二、題目

  Iris數據集在模式識別學習中十分常見了。這個數據集裏一共包括150行記錄,其中前四列爲花萼長度,花萼寬度,花瓣長度,花瓣寬度等4個用於識別鳶尾花的屬性,第5列爲鳶尾花的類別(包括Setosa,Versicolour,Virginica三類)。
  即通過判定花萼長度,花萼寬度,花瓣長度,花瓣寬度的尺寸大小來識別鳶尾花的類別。
  這個數據集可從UCI數據集上下載,具體地址爲:http://archive.ics.uci.edu/ml/datasets/Iris。
  也可以直接從sklearn包裏datasets裏導入,語法爲:from sklearn.datasets import load_iris。

還可在這篇博文中獲取,詳情點擊這裏

三、實踐部分

在這裏插入圖片描述

在這裏插入圖片描述
在這裏插入圖片描述

在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述
在這裏插入圖片描述

在這裏插入圖片描述

四、源代碼

import pandas as pd
# http://archive.ics.uci.edu/ml/datasets/Iris
# from sklearn import datasets
# iris = datasets.load_iris()

iris_data=pd.read_csv('iris.csv')
iris_data.head()
No Sepal.Length Sepal.Width Petal.Length Petal.Width Species
0 1 5.1 3.5 1.4 0.2 setosa
1 2 4.9 3.0 1.4 0.2 setosa
2 3 4.7 3.2 1.3 0.2 setosa
3 4 4.6 3.1 1.5 0.2 setosa
4 5 5.0 3.6 1.4 0.2 setosa
type(iris_data)
pandas.core.frame.DataFrame
# 刪除序號列
iris_data_01 = iris_data.drop('No',axis=1)
iris_data_01.head()
Sepal.Length Sepal.Width Petal.Length Petal.Width Species
0 5.1 3.5 1.4 0.2 setosa
1 4.9 3.0 1.4 0.2 setosa
2 4.7 3.2 1.3 0.2 setosa
3 4.6 3.1 1.5 0.2 setosa
4 5.0 3.6 1.4 0.2 setosa
# 註明表頭
iris_data_01 .columns=['sepal_lengh_cm','sepal_width_cm','petal_length_cm','petal_width_cm','class']
iris_data_01.head()
sepal_lengh_cm sepal_width_cm petal_length_cm petal_width_cm class
0 5.1 3.5 1.4 0.2 setosa
1 4.9 3.0 1.4 0.2 setosa
2 4.7 3.2 1.3 0.2 setosa
3 4.6 3.1 1.5 0.2 setosa
4 5.0 3.6 1.4 0.2 setosa
iris_data_01.describe()
sepal_lengh_cm sepal_width_cm petal_length_cm petal_width_cm
count 150.000000 150.000000 150.000000 150.000000
mean 5.843333 3.057333 3.758000 1.199333
std 0.828066 0.435866 1.765298 0.762238
min 4.300000 2.000000 1.000000 0.100000
25% 5.100000 2.800000 1.600000 0.300000
50% 5.800000 3.000000 4.350000 1.300000
75% 6.400000 3.300000 5.100000 1.800000
max 7.900000 4.400000 6.900000 2.500000
import numpy as np

import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
# 數據可視化
def scatter_plot_by_category(feat, x, y):
    alpha = 0.5
    gs = iris_data_01.groupby(feat)
    cs = cm.rainbow(np.linspace(0, 1, len(gs)))
    for g, c in zip(gs, cs):
        plt.scatter(g[1][x], g[1][y], color=c, alpha=alpha)
plt.figure(figsize=(20,5))
plt.subplot(131)
scatter_plot_by_category('class', 'sepal_lengh_cm', 'petal_length_cm')
plt.xlabel('sepal_len')
plt.ylabel('petal_len')
plt.title('class')
Text(0.5, 1.0, 'class')

在這裏插入圖片描述

import seaborn as sb
plt.figure(figsize=(20, 10))
for column_index, column in enumerate(iris_data_01.columns):
    if column == 'class':
        continue
    plt.subplot(2, 2, column_index + 1)
    sb.violinplot(x='class', y=column, data=iris_data_01)

在這裏插入圖片描述

# 首先對數據進行切分,即分出數據集和測試集
from sklearn.model_selection import train_test_split   #引入數據集拆分的模塊
all_inputs = iris_data_01[['sepal_lengh_cm', 'sepal_width_cm','petal_length_cm', 'petal_width_cm']].values
all_classes = iris_data_01['class'].values
# 劃分訓練集
(X_train,
 X_test,
 Y_train,
 Y_test) = train_test_split(all_inputs, all_classes, train_size=0.8, random_state=1)

關於 train_test_split 函數參數的說明:

train_data:被劃分的樣本特徵集

train_target:被劃分的樣本標籤

test_size:float-獲得多大比重的測試樣本 (默認:0.25)

int - 獲得多少個測試樣本

random_state:是隨機數的種子。

X_train[:10]
array([[6.1, 3. , 4.6, 1.4],
       [7.7, 3. , 6.1, 2.3],
       [5.6, 2.5, 3.9, 1.1],
       [6.4, 2.8, 5.6, 2.1],
       [5.8, 2.8, 5.1, 2.4],
       [5.3, 3.7, 1.5, 0.2],
       [5.5, 2.3, 4. , 1.3],
       [5.2, 3.4, 1.4, 0.2],
       [6.5, 2.8, 4.6, 1.5],
       [6.7, 2.5, 5.8, 1.8]])
# 使用決策樹算法進行訓練
from sklearn.tree import DecisionTreeClassifier
# 定義一個決策樹對象
decision_tree_classifier = DecisionTreeClassifier()
# 訓練模型
model = decision_tree_classifier.fit(X_train, Y_train)
# 所得模型的準確性
decision_tree_classifier.score(X_test, Y_test)
0.9666666666666667
X_test[0:3]
array([[5.8, 4. , 1.2, 0.2],
       [5.1, 2.5, 3. , 1.1],
       [6.6, 3. , 4.4, 1.4]])
Y_test[0:3]
array(['setosa', 'versicolor', 'versicolor'], dtype=object)
# 簡單預測test集合前三個
model.predict(X_test[0:3])
array(['setosa', 'versicolor', 'versicolor'], dtype=object)
# 用kNN算法
from sklearn import neighbors          
from sklearn import datasets  # 可以從這裏拿到iris的原始數據
# 創建一個knn分類器
knn = neighbors.KNeighborsClassifier()
# 創建模型
knn.fit(X_train, Y_train)
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
                     metric_params=None, n_jobs=None, n_neighbors=5, p=2,
                     weights='uniform')
# 嘗試預測
knn.predict([[0.1, 0.2, 0.3, 0.4]])
array(['setosa'], dtype=object)
# 查看預測的準確性
knn.score(X_test, Y_test)
1.0
X_test[0:3]
array([[5.8, 4. , 1.2, 0.2],
       [5.1, 2.5, 3. , 1.1],
       [6.6, 3. , 4.4, 1.4]])
Y_test[0:3]
array(['setosa', 'versicolor', 'versicolor'], dtype=object)
model.predict(X_test[0:3])
array(['setosa', 'versicolor', 'versicolor'], dtype=object)
# 自己實現knn算法
# 詳情參考https://www.cnblogs.com/jyroy/p/9427977.html

import csv
import random
import math
import operator

# 加載數據集
def loadDataset(filename, split, trainingSet = [], testSet = []):
    with open(filename, 'r') as csvfile:
        lines = csv.reader(csvfile)
        dataset = list(lines)
        for x in range(len(dataset)-1):
            for y in range(4):
                dataset[x][y] = float(dataset[x][y])
            if random.random() < split:  #將數據集隨機劃分
                trainingSet.append(dataset[x])
            else:
                testSet.append(dataset[x])

# 計算點之間的距離,多維度的
def euclideanDistance(instance1, instance2, length):
    distance = 0
    for x in range(length):
        # 歐式距離
        distance += pow((instance1[x]-instance2[x]), 2)
    return math.sqrt(distance)

# 獲取k個鄰居
def getNeighbors(trainingSet, testInstance, k):
    distances = []
    length = len(testInstance)-1
    for x in range(len(trainingSet)):
        dist = euclideanDistance(testInstance, trainingSet[x], length)
        distances.append((trainingSet[x], dist))   #獲取到測試點到其他點的距離
    distances.sort(key=operator.itemgetter(1))    #對所有的距離進行排序
    neighbors = []
    for x in range(k):   #獲取到距離最近的k個點
        neighbors.append(distances[x][0])
        return neighbors

# 得到這k個鄰居的分類中最多的那一類
def getResponse(neighbors):
    classVotes = {}
    for x in range(len(neighbors)):
        response = neighbors[x][-1]
        if response in classVotes:
            classVotes[response] += 1
        else:
            classVotes[response] = 1
    sortedVotes = sorted(classVotes.items(), key=operator.itemgetter(1), reverse=True)
    return sortedVotes[0][0]   # 獲取到票數最多的類別

#計算預測的準確率
def getAccuracy(testSet, predictions):
    correct = 0
    for x in range(len(testSet)):
        if testSet[x][-1] == predictions[x]:
            correct += 1
    return (correct/float(len(testSet)))*100.0


def main():
    #prepare data
    trainingSet = []
    testSet = []
    split = 0.67
    loadDataset(r'irisdata.txt', split, trainingSet, testSet)
    print('Trainset: ' + repr(len(trainingSet)))
    print('Testset: ' + repr(len(testSet)))
    #generate predictions
    predictions = []
    k = 3
    for x in range(len(testSet)):
        # trainingsettrainingSet[x]
        neighbors = getNeighbors(trainingSet, testSet[x], k)
        result = getResponse(neighbors)
        predictions.append(result)
        print ('predicted=' + repr(result) + ', actual=' + repr(testSet[x][-1]))
    print('predictions: ' + repr(predictions))
    accuracy = getAccuracy(testSet, predictions)
    print('Accuracy: ' + repr(accuracy) + '%')

if __name__ == '__main__':
    main()
'''
詳情參考https://blog.csdn.net/Asun0204/article/details/75607948
k近鄰(kNN)算法的工作機制:根據某種距離測度找出距離給定待測樣本距離最小的k個訓練樣本,根據k個訓練樣本進行預測。
分類問題:k個點中出現頻率最高的類別作爲待測樣本的類別
迴歸問題:通常以k個訓練樣本的平均值作爲待測樣本的預測值
kNN模型三要素:距離測度、k值的選擇、分類或迴歸決策方式
'''
import numpy as np
class KNNClassfier(object):

    def __init__(self, k=5, distance='euc'):
        self.k = k
        self.distance = distance
        self.x = None
        self.y = None
    def fit(self,X, Y):
        '''
        X : array-like [n_samples,shape]
        Y : array-like [n_samples,1]
        '''
        self.x = X
        self.y = Y
    def predict(self,X_test):
        '''
        X_test : array-like [n_samples,shape]
        Y_test : array-like [n_samples,1]
        output : array-like [n_samples,1]
        '''
        output = np.zeros((X_test.shape[0],1))
        for i in range(X_test.shape[0]):
            dis = []
            for j in range(self.x.shape[0]):
                if self.distance == 'euc': # 歐式距離
                    dis.append(np.linalg.norm(X_test[i]-self.x[j,:]))
            labels = []
            index=sorted(range(len(dis)), key=dis.__getitem__)
            for j in range(self.k):
                labels.append(self.y[index[j]])
            counts = []
            for label in labels:
                counts.append(labels.count(label))
            output[i] = labels[np.argmax(counts)]
        return output
    def score(self,x,y):
        pred = self.predict(x)
        err = 0.0
        for i in range(x.shape[0]):
            if pred[i]!=y[i]:
                err = err+1
        return 1-float(err/x.shape[0])


if __name__ == '__main__':
    from sklearn import datasets
    iris = datasets.load_iris()
    x = iris.data
    y = iris.target
    # x = np.array([[0.5,0.4],[0.1,0.2],[0.7,0.8],[0.2,0.1],[0.4,0.6],[0.9,0.9],[1,1]]).reshape(-1,2)
    # y = np.array([0,1,0,1,0,1,1]).reshape(-1,1)
    clf = KNNClassfier(k=3)
    clf.fit(x,y)
    print('myknn score:',clf.score(x,y))
    from sklearn.neighbors import KNeighborsClassifier
    clf_sklearn = KNeighborsClassifier(n_neighbors=3)
    clf_sklearn.fit(x,y)
    print('sklearn score:',clf_sklearn.score(x,y))
    # 0.96
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章