BP神經網絡PYthon實現(帶有”增加充量項“)

# Back-Propagation Neural Networks
# 

import math
import random
import string

random.seed(0)

# calculate a random number where:  a <= rand < b
def rand(a, b):
    return (b-a)*random.random() + a

# Make a matrix (we could use NumPy to speed this up)
def makeMatrix(I, J, fill=0.0):
    m = []
    for i in range(I):
        m.append([fill]*J)
    return m

# our sigmoid function, tanh is a little nicer than the standard 1/(1+e^-x)
#使用雙正切函數代替logistic函數
def sigmoid(x):
    return math.tanh(x)

# derivative of our sigmoid function, in terms of the output (i.e. y)
# 雙正切函數的導數,在求取輸出層和隱藏側的誤差項的時候會用到
def dsigmoid(y):
    return 1.0 - y**2

class NN:
    def __init__(self, ni, nh, no):
        # number of input, hidden, and output nodes
        # 輸入層,隱藏層,輸出層的數量,三層網絡
        self.ni = ni + 1 # +1 for bias node
        self.nh = nh
        self.no = no

        # activations for nodes
        self.ai = [1.0]*self.ni
        self.ah = [1.0]*self.nh
        self.ao = [1.0]*self.no
        
        # create weights
        #生成權重矩陣,每一個輸入層節點和隱藏層節點都連接
        #每一個隱藏層節點和輸出層節點鏈接
        #大小:self.ni*self.nh
        self.wi = makeMatrix(self.ni, self.nh)
        #大小:self.ni*self.nh
        self.wo = makeMatrix(self.nh, self.no)
        # set them to random vaules
        #生成權重,在-0.2-0.2之間
        for i in range(self.ni):
            for j in range(self.nh):
                self.wi[i][j] = rand(-0.2, 0.2)
        for j in range(self.nh):
            for k in range(self.no):
                self.wo[j][k] = rand(-2.0, 2.0)

        # last change in weights for momentum 
        #?
        self.ci = makeMatrix(self.ni, self.nh)
        self.co = makeMatrix(self.nh, self.no)

    def update(self, inputs):
        if len(inputs) != self.ni-1:
            raise ValueError('wrong number of inputs')

        # input activations
        # 輸入的激活函數,就是y=x;
        for i in range(self.ni-1):
            #self.ai[i] = sigmoid(inputs[i])
            self.ai[i] = inputs[i]

        # hidden activations
        #隱藏層的激活函數,求和然後使用壓縮函數
        for j in range(self.nh):
            sum = 0.0
            for i in range(self.ni):
                #sum就是《ml》書中的net
                sum = sum + self.ai[i] * self.wi[i][j]
            self.ah[j] = sigmoid(sum)

        # output activations
        #輸出的激活函數
        for k in range(self.no):
            sum = 0.0
            for j in range(self.nh):
                sum = sum + self.ah[j] * self.wo[j][k]
            self.ao[k] = sigmoid(sum)

        return self.ao[:]

    #反向傳播算法 targets是樣本的正確的輸出
    def backPropagate(self, targets, N, M):
        if len(targets) != self.no:
            raise ValueError('wrong number of target values')

        # calculate error terms for output
        #計算輸出層的誤差項 
        output_deltas = [0.0] * self.no
        for k in range(self.no):
            #計算k-o
            error = targets[k]-self.ao[k]
            #計算書中公式4.14
            output_deltas[k] = dsigmoid(self.ao[k]) * error

        # calculate error terms for hidden
        #計算隱藏層的誤差項,使用《ml》書中的公式4.15
        hidden_deltas = [0.0] * self.nh
        for j in range(self.nh):
            error = 0.0
            for k in range(self.no):
                error = error + output_deltas[k]*self.wo[j][k]
            hidden_deltas[j] = dsigmoid(self.ah[j]) * error

        # update output weights
        # 更新輸出層的權重參數
        # 這裏可以看出,本例使用的是帶有“增加衝量項”的BPANN
        # 其中,N爲學習速率 M爲充量項的參數 self.co爲衝量項
        # N: learning rate
        # M: momentum factor
        for j in range(self.nh):
            for k in range(self.no):
                change = output_deltas[k]*self.ah[j]
                self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
                self.co[j][k] = change
                #print N*change, M*self.co[j][k]

        # update input weights
        #更新輸入項的權重參數
        for i in range(self.ni):
            for j in range(self.nh):
                change = hidden_deltas[j]*self.ai[i]
                self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
                self.ci[i][j] = change

        # calculate error
        #計算E(w)
        error = 0.0
        for k in range(len(targets)):
            error = error + 0.5*(targets[k]-self.ao[k])**2
        return error

    #測試函數,用於測試訓練效果
    def test(self, patterns):
        for p in patterns:
            print(p[0], '->', self.update(p[0]))

    def weights(self):
        print('Input weights:')
        for i in range(self.ni):
            print(self.wi[i])
        print()
        print('Output weights:')
        for j in range(self.nh):
            print(self.wo[j])

    def train(self, patterns, iterations=1000, N=0.5, M=0.1):
        # N: learning rate
        # M: momentum factor
        for i in range(iterations):
            error = 0.0
            for p in patterns:
                inputs = p[0]
                targets = p[1]
                self.update(inputs)
                error = error + self.backPropagate(targets, N, M)
            if i % 100 == 0:
                print('error %-.5f' % error)


def demo():
    # Teach network XOR function
    pat = [
        [[0,0], [0]],
        [[0,1], [1]],
        [[1,0], [1]],
        [[1,1], [0]]
    ]

    # create a network with two input, two hidden, and one output nodes
    n = NN(2, 2, 1)
    # train it with some patterns
    n.train(pat)
    # test it
    n.test(pat)



if __name__ == '__main__':
    demo()



輸出

>>> ================================ RESTART ================================
>>> 
error 0.94250
error 0.04287
error 0.00348
error 0.00164
error 0.00106
error 0.00078
error 0.00125
error 0.00053
error 0.00044
error 0.00038
([0, 0], '->', [0.03668584043139609])
([0, 1], '->', [0.9816625517128087])
([1, 0], '->', [0.9815264813097478])
([1, 1], '->', [-0.03146072993485337])
>>> 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章