計算智能 -- BP神經網絡(1)

本文實現的是對張軍等編寫的《計算智能》第二章中的2.4.3引用舉例的code實現。

圖一
計算智能

圖二
應用舉例

import java.util.Scanner;

public class Bp {

    private int InputLayer = 3;         //輸入層
    private int HiddenLayer = 2;        //隱含層
    private int OutputLayer = 1;        //輸出層
    private double LearningRate = 0.9;  //學習率
    private double ExpectedValue = 1;   //預期值
    double [][] Input_Hidden_weights = new double [InputLayer][HiddenLayer];    //輸入層到隱含層的權重
    double [][] Hidden_Output_weights = new double [HiddenLayer][OutputLayer];  //隱含層到輸出層的權重
    double [] Hidden_polarization = new double[HiddenLayer];    //隱含層的偏置
    double [] Output_polarization = new double[OutputLayer];    //輸出層的偏置
    double [] HiddenInputValues = new double[HiddenLayer];      //隱含層的輸入值
    double [] HiddenOutputValues = new double[HiddenLayer];     //隱含層的輸出值
    double [] OutputInputValues = new double[OutputLayer];      //輸出層的輸入值
    double [] OutputOutputValues = new double[OutputLayer];     //輸出層的輸出值
    double [] ErrorHiddenValues = new double[HiddenLayer];      //隱含層的誤差值
    double [] ErrorOutputValues = new double[OutputLayer];      //輸出層的誤差值
    double [][] Adjustment_Input_Hidden_weights = new double [InputLayer][HiddenLayer];  //調整輸入層到隱含層的權重
    double [][] Adjustment_Hidden_Output_weights = new double [HiddenLayer][OutputLayer]; //調整隱含層到輸出層的權重
    double [] Adjustment_Hidden_polarization = new double [HiddenLayer];  //調整隱含層的偏置
    double [] Adjustment_Output_polarization = new double [OutputLayer]; //調整輸出層的偏置


    //一、初始化網絡權重和偏置
    void initialize(){

        Scanner in = new Scanner(System.in);

        //(1)輸入層到隱含層的連接權重
        System.out.println("請輸入輸出層到隱含層的連接權重:");
        for (int i = 0 ; i < InputLayer ; i++ ){
            for ( int j = 0 ; j < HiddenLayer; j++ ){
                Input_Hidden_weights[i][j]= in.nextDouble();
            }
        }

        //(2)隱含層到輸出層的連接權重
        System.out.println("請輸入隱含層到輸出層的連接權重:");
        for (int i = 0 ; i < HiddenLayer ; i++ ){
            for ( int j = 0 ; j < OutputLayer; j++ ){
                Hidden_Output_weights[i][j]= in.nextDouble();
            }
        }

        //(3)隱含層的偏置
        System.out.println("請輸入隱含層的偏置:");
        for (int i = 0 ; i < HiddenLayer ; i++ ){
            Hidden_polarization[i] = in.nextDouble(); 
        }

        //(4)輸出層的偏置
        System.out.println("請輸入輸出層的偏置:");
        for (int i = 0 ; i < OutputLayer ; i++ ){
            Output_polarization[i] = in.nextDouble(); 
        }   

        System.out.println("*********************************");  
    }

    //二、創建激活函數
    double ActivationFunction(double x){
        return 1/( 1 + Math.pow(Math.E,-x));
    }

    //三、向前傳播輸入(前饋型網絡)
    void Forward( double[] TrainingSamples ){

        //隱含層的總輸入和輸出
        for(int i = 0; i < HiddenInputValues.length ; i++ ){
            double OutputSumTemp = 0; //臨時存放數據
            for(int j = 0; j < TrainingSamples.length; j++ ){
                OutputSumTemp += TrainingSamples[j]*Input_Hidden_weights[j][i];
            }
            HiddenInputValues[i]= OutputSumTemp + Hidden_polarization[i];
            System.out.println("神經元"+(i+4)+"的總輸入:");
            System.out.println(HiddenInputValues[i]);
            HiddenOutputValues[i] = ActivationFunction(HiddenInputValues[i]);
            System.out.println("神經元"+(i+4)+"的總輸出:");
            System.out.println(HiddenOutputValues[i]);
        }

        //輸出層的總輸入和輸出
        for( int i = 0 ; i < OutputLayer ; i++ ){
            double OutputSumTemp = 0; //臨時存放數據
            for(int j = 0; j < HiddenLayer ; j++ ){
                OutputSumTemp += HiddenOutputValues[j]*Hidden_Output_weights[j][i];
            }
            OutputInputValues[i]= OutputSumTemp + Output_polarization[i];
            System.out.println("神經元"+(i+6)+"的總輸入:");
            System.out.println(OutputInputValues[i]);
            OutputOutputValues[i] = ActivationFunction(OutputInputValues[i]);
            System.out.println("神經元"+(i+6)+"的總輸出:");
            System.out.println(OutputOutputValues[i]);
        }
        System.out.println("*********************************");
    }

    //三、反向誤差傳播
    void BackWard(){

        //輸出層的誤差
        for(int i = 0 ; i < OutputLayer ; i++ ){
            ErrorOutputValues[i] = OutputOutputValues[i]*( 1 - OutputOutputValues[i])*( ExpectedValue - OutputOutputValues[i] );
            System.out.println("神經元"+(i+6)+"的誤差:");
            System.out.println(ErrorOutputValues[i]);
        }

        //隱含層的誤差
        for(int i = HiddenLayer - 1 ; i >= 0 ; i-- ){
            double ErrorTemp = 0; //臨時存放數據
            for(int j = 0 ; j < OutputLayer ; j++ ){
                ErrorTemp = HiddenOutputValues[i]*( 1-HiddenOutputValues[i])*ErrorOutputValues[j]*Hidden_Output_weights[i][j];
            }
            ErrorHiddenValues[i] = ErrorTemp;
            System.out.println("神經元"+(i+4)+"的誤差:");
            System.out.println(ErrorHiddenValues[i]);
        }

        System.out.println("*********************************");
    }

    //四、網絡連接權重和偏置的調整
    void Adjustment(double[] TrainingSamples){

        //(1)調整隱含層到輸出層的權重值
        System.out.println("調整隱含層到輸出層的連接權重:");
        for(int i = 0 ; i < HiddenLayer ; i++ ){
            double TempWeights = 0; //臨時存放數據
            for(int j = 0  ; j < OutputLayer ; j++ ){
                TempWeights = Hidden_Output_weights[i][j]+LearningRate*ErrorOutputValues[j]*HiddenOutputValues[i];
                Adjustment_Hidden_Output_weights[i][j] = TempWeights;
                System.out.println(Adjustment_Hidden_Output_weights[i][j]);
            }
        }

        //(2)調整輸入層到隱含層的權重值
        System.out.println("調整輸入層到隱含層的權重值:");
        for(int i = 0 ; i < InputLayer ; i++ ){
            double TempWeights = 0; //臨時存放數據
            for(int j = 0 ; j < HiddenLayer; j++ ){
                TempWeights = Input_Hidden_weights[i][j]+LearningRate*ErrorHiddenValues[j]*TrainingSamples[i];
                Adjustment_Input_Hidden_weights[i][j] = TempWeights;
                System.out.println(Adjustment_Input_Hidden_weights[i][j]);
            }
        }

        //(3)調整輸出層的偏置
        System.out.println("調整輸出層的偏置:");
        for(int i = 0; i < OutputLayer; i++ ){
            Adjustment_Output_polarization[i] = Output_polarization[i] + LearningRate * ErrorOutputValues[i];
            System.out.println(Adjustment_Output_polarization[i]);
        }

        //(4)調整隱含層的偏置
        System.out.println("調整隱含層的偏置:");
        for(int i = HiddenLayer - 1 ; i >= 0; i-- ){
            Adjustment_Hidden_polarization[i] = Hidden_polarization[i] + LearningRate * ErrorHiddenValues[i];
            System.out.println(Adjustment_Hidden_polarization[i]);
        }
    }

    //五、判斷結束
    void Judge(){

    }

    public static void main(String[] args) {
        // TODO Auto-generated method stub
        Bp bpnn = new Bp();
        bpnn.initialize();
        double[] TrainData = {1,0,1};
        bpnn.Forward(TrainData);
        bpnn.BackWard();
        //如果誤差不在容忍的範圍內則進行調整
        bpnn.Adjustment(TrainData);
    }
}

運行截圖:

運行截圖

注意點:
這個code實現的是一次計算,由於判斷函數沒有去實現。書中對判斷結束描述如下:對於每個樣本,如果最終的輸出誤差小於可接受的範圍或者迭代次數t達到了一定的閾值,則選取下一個樣本,轉到步驟二繼續執行;否則,迭代次數加1,然後向步驟2繼續使用當前樣本訓練。

參考資料:
(1)woodbean – BP神經網絡實現(Java代碼):http://blog.csdn.net/woodbean/article/details/7175378
(2)ACdreamers – BP神經網絡:http://blog.csdn.net/acdreamers/article/details/44657439
(3)https://zhidao.baidu.com/question/320522363.html
(4)http://www.cnblogs.com/hesi/p/7218602.html
(5)刀客123 – BP神經網絡的數據分類 : http://blog.csdn.net/dingyahui123/article/details/72809153?locationNum=1&fps=1
(6)zhc0822 – BP神經網絡的Java實現 : http://fantasticinblur.iteye.com/blog/1465497

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章