Logistic function for matlab

2 visualizaciones (últimos 30 días)
PRITESH GARG
PRITESH GARG el 5 de Ag. de 2015
I have the following code written in C language for Artificial Neural Network. I need the same to be written in Matlab. I get rest of the part but I need a clarity on the segment where _ logistic _ function is there. How to give the conditions as specified for the logistic function? I am using logsig function of Matlab for the same but have not given any conditions like (if(x > 100.0) x = 1.0;else if (x < -100.0) x = 0.0;else x = 1.0/(1.0+exp(-x));return x;). Please guide
//Analysis Type - Regression
#include <stdio.h>
#include <conio.h>
#include <math.h>
#include <stdlib.h>
double input_hidden_weights[10][11]= { {-1.08981181165092e+000, -8.45560986618201e-001, -1.15508735823059e+000, -1.53961757221652e+000, 1.95110553951748e+000, -6.15347316032675e-001, 2.34981067790958e-001, 2.11591756930854e+000, -2.01297363020452e+000, 1.77311648056644e+000, -3.36121802487997e-002 }, {7.66427717846725e-002, -2.02751489179458e-001, 2.54934123008781e-001, -2.39451491109902e-001, -1.96768687215035e-001, 1.57765919556478e-001, -5.87478301142373e-001, -9.48354449009283e-002, -1.63562336110873e-001, 8.89949736043556e-002, 1.54980222184378e-001 }, {6.49506614263822e-002, 1.29375882981202e-002, 9.51307001473201e-002, 6.29930719603194e-002, -2.40433384903935e-002, -1.81101036169428e-001, -5.98997346971256e-001, 2.86236037910268e-001, 1.88528006858646e-001, 4.07835327449595e-001, 1.53198400889580e-002 }, {3.43383738312347e-001, 3.25208757138932e-001, 3.76423972070529e-001, 2.48028271117508e-001, -2.83182187359758e+000, 8.33270614893402e-001, -2.23299633432068e+000, 7.69604342015733e-001, 1.88022442800940e+000, -2.61556150633244e-001, -1.08354609581291e+000 }, {1.37601000033603e-001, -4.41927634620918e-001, 1.27608747217935e-001, -3.06264312202355e-001, -1.41097097602089e-001, 5.91413509333915e-002, -6.22865963139176e-001, -2.26871735230450e-001, -2.67592629441600e-001, 1.08563205015783e-001, 4.68410336031276e-001 }, {-2.39869041697278e+000, -8.43849099046725e-001, -1.61024560303062e+000, -1.42318654603153e+000, 3.13396949839847e+000, -6.12995547719934e-001, 1.45678724289065e+000, -5.58768415542200e-001, 2.38935240343021e+000, 6.68172090343994e-001, -5.74479377270936e-001 }, {3.29361303229807e-001, 4.34694656767008e-001, 3.15900384325763e-002, 1.22261295232762e-001, -1.34832321057328e+000, 5.39725350407144e-001, -7.78134976237650e-001, 3.61130762963300e-001, 1.11904082830058e+000, -3.13180152731005e-001, -1.10542024230660e+000 }, {5.79380377669733e-001, 6.76871129832203e-001, -1.67000251248133e-001, 8.76743089068297e-001, -1.62536633095877e+000, -1.31890356361784e-001, -1.14736923193155e+000, 2.11716451633814e+000, -9.96498273311219e-001, 1.18544899655051e+000, -3.82918567097315e-001 }, {-5.18995067017132e-003, -6.28364804283434e-003, 7.41520369935559e-002, 1.52180456799685e-002, -1.39482745020619e-001, 1.38533439200179e-002, -9.75012655116616e-002, 2.92959748527167e-003, -2.28647300593702e-002, -7.16167486080281e-002, -8.37345588016407e-002 }, {-3.25440754201253e-001, 2.69520104584687e-002, 3.01473987470206e-002, -2.72697301978532e-002, -6.89848180310552e-001, 1.41754816248037e-001, -2.57792880258895e-001, 1.81687113108610e-001, 3.80292862477277e-001, -1.78745707137620e-001, -6.46077861307748e-001 } };
double hidden_bias[10]={ 6.89420547033673e-001, 1.97888937317496e-001, -3.16427171988281e-001, 4.13398486702406e-001, 2.02538540907513e-001, 1.31084613185094e-001, 2.20540172349928e-001, -8.67712137156079e-001, -3.99310604284152e-002, -1.80569831186016e-001 };
double hidden_output_wts[1][10]= { {7.93199602141392e-001, -5.04645816842105e-001, -1.19130140962352e-001, 9.06142459839867e-001, -6.68164935942016e-001, -6.80192074991878e-001, 3.07729410106918e-001, -8.98527909164811e-001, 1.28212547799242e-001, 1.01382966845005e-001 } };
double output_bias[1]={ 8.02263165114899e-001 };
double max_input[11]={ 2.35000000000000e+000, 1.11800000000000e+001, 1.25000000000000e+000, 1.16900000000000e+001, 1.31300000000000e+001, 6.54100000000000e+001, 1.29000000000000e+000, 5.52000000000000e+000, 5.40000000000000e+000, 1.29000000000000e+000, 1.60000000000000e-001 };
double min_input[11]={ 1.81000000000000e+000, 8.70000000000000e+000, 7.30000000000000e-001, 8.28000000000000e+000, 9.93000000000000e+000, 6.07100000000000e+001, 0.00000000000000e+000, 0.00000000000000e+000, 0.00000000000000e+000, 0.00000000000000e+000, 0.00000000000000e+000 };
double max_target[1]={ 2.57000000000000e+002 };
double min_target[1]={ 1.98000000000000e+002 };
double input[11]; double hidden[10]; double output[1];
void ScaleInputs(double* input, double minimum, double maximum, int size) { double delta; long i; for(i=0; i<size; i++) { delta = (maximum-minimum)/(max_input[i]-min_input[i]); input[i] = minimum - delta*min_input[i]+ delta*input[i]; } }
void UnscaleTargets(double* output, double minimum, double maximum, int size) { double delta; long i; for(i=0; i<size; i++) { delta = (maximum-minimum)/(max_target[i]-min_target[i]); output[i] = (output[i] - minimum + delta*min_target[i])/delta; } }
double logistic(double x) { if(x > 100.0) x = 1.0; else if (x < -100.0) x = 0.0; else x = 1.0/(1.0+exp(-x)); return x; }
void ComputeFeedForwardSignals(double* MAT_INOUT,double* V_IN,double* V_OUT, double* V_BIAS,int size1,int size2,int layer) { int row,col; for(row=0;row < size2; row++) { V_OUT[row]=0.0; for(col=0;col<size1;col++)V_OUT[row]+=(*(MAT_INOUT+(row*size1)+col)*V_IN[col]); V_OUT[row]+=V_BIAS[row]; if(layer==0) V_OUT[row] = exp(V_OUT[row]); if(layer==1) V_OUT[row] = logistic(V_OUT[row]); } }
void RunNeuralNet_Regression () { ComputeFeedForwardSignals((double*)input_hidden_weights,input,hidden,hidden_bias,11, 10,0); ComputeFeedForwardSignals((double*)hidden_output_wts,hidden,output,output_bias,10, 1,1); }

Respuestas (0)

Community Treasure Hunt

Find the treasures in MATLAB Central and discover how the community can help you!

Start Hunting!

Translated by