《統計學習方法》第4章_樸素貝葉斯法

# encoding:utf-8
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
import math


class NaiveBayes:
    def __init__(self):
        self.model = None

    """數學期望"""
    """@staticmethod不需要表示自身對象的self和自身類的cls參數,就跟使用函數一樣"""
    @staticmethod
    def mean(x):
        return sum(x) / float(len(x))

    """標準差(方差)"""
    def stdev(self, x):
        avg = self.mean(x)
        return math.sqrt(sum([pow(k - avg, 2) for k in x]) / float(len(x)))

    """概率密度函數"""
    def gaussian_probability(self, x, mean, stdev):
        exponent = math.exp(-(math.pow(x - mean, 2) / (2 * math.pow(stdev, 2))))
        return (1 / (math.sqrt(2 * math.pi * math.pow(stdev, 2)))) * exponent

    """處理x_train"""
    def summarize(self, train_data):
        """分別計算x_train的平均值(期望),標準差(方差)"""
        """            
            >>>a = [1,2,3]
            >>> b = [4,5,6]
            >>> c = [4,5,6,7,8]
            >>> zipped = zip(a,b)     # 打包爲元組的列表
            [(1, 4), (2, 5), (3, 6)]
            >>> zip(a,c)              # 元素個數與最短的列表一致
            [(1, 4), (2, 5), (3, 6)]
            >>> zip(*zipped)          # 與 zip 相反,*zipped 可理解爲解壓,返回二維矩陣式
            [(1, 2, 3), (4, 5, 6)]
        """
        sumaries = [(self.mean(i), self.stdev(i)) for i in zip(*train_data)]
        return sumaries

    """分類別求出數學期望和標準差"""
    def fit(self, x, y):
        """刪除y中重複地元素"""
        labels = list(set(y))
        data = {label: [] for label in labels}
        for f, label in zip(x, y):
            data[label].append(f)
        self.model = {
            label: self.summarize(value)
            for label, value in data.items()
        }
        return  "gaussianNB train done!"

    """計算概率"""
    def calculate_probabilities(self, input_data):
        probabilities = {}
        for label, value in self.model.items():
            probabilities[label] = 1
            for i in range(len(value)):
                mean, stdev = value[i]
                probabilities[label] *= self.gaussian_probability(input_data[i], mean, stdev)
        return  probabilities

    """類別"""
    def predict(self, x_test):
        label = sorted(self.calculate_probabilities(x_test).items(),
                       key=lambda x: x[-1])[-1][0]
        return label

    def score(self, x_test, y_test):
        right = 0
        for x, y in zip(x_test, y_test):
            label = self.predict(x)
            if label == y:
                right += 1
        return right / float(len(x_test))


"""數據"""
def create_data():
    iris = load_iris()
    df = pd.DataFrame(iris.data, columns=iris.feature_names)
    df['label'] = iris.target
    df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
    data = np.array(df.iloc[:100, :])
    return data[:, :-1], data[:, -1]


x, y = create_data()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)

model = NaiveBayes()
string = model.fit(x_train, y_train)
print(string)
print(model.predict([4.4, 3.2, 1.3, 0.2]))
print(model.score(x_test, y_test))


"""scikit-learn實例"""
clf = GaussianNB()
clf.fit(x_train, y_train)
print(clf.score(x_test, y_test))
print(clf.predict([[4.4, 3.2, 1.3, 0.2]]))

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章