BP-神經網絡

 參考博文:

https://blog.csdn.net/weixin_41090915/article/details/79521161

整理了一下代碼

import numpy as np
import matplotlib.pyplot as plt

class Test:

    def __init__(self, classes, numberperclass, dimension, times=10000, step_size=1, hidden_number=100, reg=0.001):
        self.classes = classes 
        self.numberperclass = numberperclass 
        self.dimension = dimension 
        self.times = times
        self.step_size = step_size
        self.hidden_number = hidden_number
        self.reg = reg
        self.totalnumber = self.classes * self.numberperclass
        self.train_x = np.zeros((self.totalnumber, self.dimension)) 
        self.train_y = np.zeros(self.totalnumber, dtype='uint8')
        self.w = 0.01 * np.random.randn(dimension, hidden_number)
        self.b = np.zeros([1, hidden_number])
        self.w2 = 0.01 *  np.random.randn(hidden_number, classes)
        self.b2 = np.zeros([1, classes])
        self.train_x, self.train_y = Test.randomdata(self.classes, self.numberperclass, self.dimension)


    def train(self, log=True):
        for i in range(self.times):
            hidden_layer = np.maximum(0, np.dot(self.train_x, self.w) + self.b) 
            scores = np.dot(hidden_layer, self.w2) + self.b2
            exp_scores = np.exp(scores)
            probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) 

            self.backpropate(probs, hidden_layer)
            if log:
                self.logloss(probs, i)


    def backpropate(self, probs, hidden_layer):
        dscores = probs
        dscores[range(self.totalnumber),self.train_y] -= 1
        dscores /= self.totalnumber
        dW2 = np.dot(hidden_layer.T, dscores)
        db2 = np.sum(dscores, axis=0, keepdims=True)
        dhidden = np.dot(dscores, self.w2.T)
        dhidden[hidden_layer <= 0] = 0
        dW = np.dot(self.train_x.T, dhidden)
        db = np.sum(dhidden, axis=0, keepdims=True)
        dW2 += self.reg * self.w2
        dW += self.reg * self.w
        self.w += -self.step_size * dW
        self.b += -self.step_size * db
        self.w2 += -self.step_size * dW2
        self.b2 += -self.step_size * db2
        

    @staticmethod
    def randomdata(classes, numberperclass, dimension):
        x = np.zeros((classes * numberperclass, dimension)) 
        y = np.zeros(classes * numberperclass, dtype='uint8')
        for j in range(classes):
            ix = list(range(numberperclass*j, numberperclass*(j + 1)))
            r = np.linspace(0.0, 1, numberperclass) 
            t = np.linspace(j*4, (j+1)*4, numberperclass) + np.random.randn(numberperclass)*0.2
            x[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
            y[ix] = j
        return x, y

    def logloss(self, probability, times):
        if times % 1000 != 0:
            return 
        dataloss = -np.log(probability[[range(self.totalnumber), self.train_y]])
        dataloss = dataloss.sum() / self.totalnumber
        regloss = 0.5 * self.reg * (np.sum(self.w * self.w) + np.sum(self.w2 * self.w2))
        print("%s loss: %s" % (times, dataloss + regloss))

    def show(self):
        hidden_layer = np.maximum(0, np.dot(self.train_x, self.w) + self.b)
        scores = np.dot(hidden_layer, self.w2) + self.b2
        y = np.argmax(scores, axis=1)
        print(y)
        plt.scatter(self.train_x[:, 0], self.train_x[:, 1], c=y, s=40, cmap=plt.cm.gist_rainbow)
        plt.show()

if __name__ == '__main__':
    t = Test(3, 100, 2)
    t.train()
    t.show()

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章