Algoritma Neural Network Backpropagation

Algoritma Neural Network Backpropagation
neural network backpropagation

Pada postingan kali ini saya akan menjelaskan algoritma backprogation. Algoritma neural network backpropagation merupakan algoritma pengembalian nilai error yang di hasilkan oleh sigmoid function pada proses neural network yang nantinya akan di learning lagi. Algoritma neural network ini menggunakan rumus turunan dari masing masing output layer neural network

rumus turunan

x’ =x* (1-x)

dimana x' adalah hasil dan  x merupakan output layer neural network

berikut adalah sources code neural network backpropagation  yang saya buat menggunakan bahasa pemrograman java

/* To change this license header, choose License Headers in Project Properties.
 * To change this template file, choose Tools | Templates
 * and open the template in the editor.
 */
package nnmultiperceptron;

import java.util.Random;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.text.DecimalFormat;
//import static nnmultiperceptron.NNmultiPerceptron.datalear;

/**
 *
 * @author faisalkwok
 */
public class NNmultiPerceptron {

    /**
     * @param args the command line arguments
     */
    // Jumlah Perulangan
    static int epoch = 1000;
        // Jumlah Bobot(W)
    // static int jumW = ;
    

    // Data Learning
    static double datalear[][] = {
        {0, 0, 0, 0, 1},
        {0, 2, 1, 1, 1},
        {1, 0, 0, 1, 0},
        {1, 2, 2, 2, 2},
        {2, 2, 2, 2, 2}};
   
    //jumlah data
    static int jumdata = datalear.length;

     // Jumlah input layer
    static int jumInput=datalear[0].length;
    //mendefinisikan hidden layer 
    static int jumlayer = 10;

    //inisialisasi jumla output layer
    static int jumoutput = 1;
    //inisialisasi weight
    // Target Data
    static double target[] = {0, 0, 0, 1, 1};
   // static double target[]=new double[600];
    // Nilai Bias
    static double bias[] = {0.1,0.1};
    //Mendefinisikan nilai Learning Rate
    static double miu = 0.1;
    static double[][] X = new double[100][100];
    static double[][] dw = new double[100][100];
    static double D[][] = new double[100][100];

    public static void main(String[] args) {

        double error;
        double w1[][] = new double[100][100];
        double w2[][] = new double[100][100];
       Random r = new Random();
        //membangkitkan bobot(W) secara Random
        for (int k = 0; k < jumlayer; k++) {
            for (int l = 0; l < jumInput + 1; l++) {

                w1[k][l] = -1.0 + (1.0 - (-1.0)) * r.nextDouble();
            }
        }
        for (int k = 0; k < jumoutput; k++) {
            for (int l = 0; l < jumlayer + 1; l++) {
                w2[k][l] = -1.0 + (1.0 - (-1.0)) * r.nextDouble();
            }
        }
         double totalsse=0;

        for (int ep=0;ep<epoch;ep++) {
            System.out.println("Perulangan ke-" + (ep + 1));
            double tot = 0;
            for (int s = 0; s < jumdata; s++) {
                for (int in = 0; in < jumInput; in++) {
                    X[0][in] = datalear[s][in];
                }
                X[0][jumInput]=bias[0];
                for (int m = 0; m < jumlayer; m++) {
                    double sum = 0;
                    for (int i = 0; i < jumInput; i++) {
                        sum =sum+X[0][i] * w1[m][i];
                    }
                    sum = sum + X[0][jumInput] * w1[m][jumInput];
                    //sigmoid hidden layer
                    X[1][m] = sigmoid(sum);
                    
                }
                X[1][jumlayer]=bias[1];
                for (int a = 0; a < jumoutput; a++) {
                    double sum = 0;
                    for (int b = 0; b < jumlayer; b++) {
                        sum = sum +X[1][b] * w2[a][b];
                        //System.out.println(w2[a][b]);
                    }
                     sum = sum+X[1][jumlayer]*w2[a][jumlayer] ;
                    //sigmoid output layer
                    X[2][a] = sigmoid(sum);
                
                }
               
                DecimalFormat df = new DecimalFormat("#.##");
                double selisih = 0;
                for (int sse = 0; sse < jumoutput; sse++) {
                   
                    selisih = Math.abs(target[s] - X[2][sse]);
             
                    tot +=selisih;
                }
             
        //back pro
               for (int k = 0; k < jumoutput; k++) {
                    D[3][k] = (X[2][k] * (1 - X[2][k])) * (target[s] - X[2][k]);
                    
                    for (int l = 0; l < jumlayer; l++) {
                        D[2][l] = X[1][l] * (1 - X[1][l]) * w2[k][l] * D[3][k];
                        
                    }
                    
                    for (int t = 0; t < jumlayer+1; t++) {
                        dw[2][t] = miu * X[1][t] * D[3][k];
                        
                        w2[k][t] = w2[k][t] + dw[2][t];
                        
                   }
                    
                }
                
                for (int k = 0; k < jumlayer; k++) {
                    for (int l = 0; l < jumInput; l++) {
                        dw[k][l] = miu * X[0][l] * D[2][k];
                        w1[k][l] = w1[k][l] + dw[k][l];
                    }
                    
                }
                
            }
            error=Math.sqrt(tot)*1000;
             System.out.println(error);
        }     
    }

    public static double sigmoid(double x) {
        double hasil = 0;
        hasil = (1 / (1 + Math.exp(-1 * x)));
        
        return hasil;
    }

1 komentar:

Posting Komentar