FFN.C

Go to the documentation of this file.
00001 /*!@file Gist/FFN.C Feed Forward Network  for use to train any mapping  */
00002 // //////////////////////////////////////////////////////////////////// //
00003 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00004 // University of Southern California (USC) and the iLab at USC.         //
00005 // See http://iLab.usc.edu for information about this project.          //
00006 // //////////////////////////////////////////////////////////////////// //
00007 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00008 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00009 // in Visual Environments, and Applications'' by Christof Koch and      //
00010 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00011 // pending; application number 09/912,225 filed July 23, 2001; see      //
00012 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00013 // //////////////////////////////////////////////////////////////////// //
00014 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00015 //                                                                      //
00016 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00017 // redistribute it and/or modify it under the terms of the GNU General  //
00018 // Public License as published by the Free Software Foundation; either  //
00019 // version 2 of the License, or (at your option) any later version.     //
00020 //                                                                      //
00021 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00022 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00023 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00024 // PURPOSE.  See the GNU General Public License for more details.       //
00025 //                                                                      //
00026 // You should have received a copy of the GNU General Public License    //
00027 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00028 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00029 // Boston, MA 02111-1307 USA.                                           //
00030 // //////////////////////////////////////////////////////////////////// //
00031 //
00032 // Created by Chris Ackerman
00033 // Primary maintainer for this file: Christian Siagian <siagian@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Gist/FFN.C $
00035 // $Id: FFN.C 13712 2010-07-28 21:00:40Z itti $
00036 //
00037 
00038 // ######################################################################
00039 /*! This implements a simple feedforward network with backpropagation.
00040   Supports 2 or 3-weight-layer networks through separate function calls
00041   NOTE: please allow the class to create the initial weight files
00042         (use the init function that does not ask for weight files).
00043         Also user should add an extra input for bias (set it to 1.0)   */
00044 
00045 #include "Gist/FFN.H"
00046 #include "Image/MatrixOps.H"
00047 #include "Image/CutPaste.H"
00048 #include "Util/log.H"
00049 
00050 // ######################################################################
00051 // ######################################################################
00052 // Feed Forward Network member definitions:
00053 // ######################################################################
00054 // ######################################################################
00055 
00056 // ######################################################################
00057 FeedForwardNetwork::FeedForwardNetwork()
00058 { }
00059 
00060 // ######################################################################
00061 FeedForwardNetwork::~FeedForwardNetwork()
00062 { }
00063 
00064 // ######################################################################
00065 // sigmoid function
00066 void FeedForwardNetwork::inPlaceSigmoid(Image<double>& dst)
00067 {
00068   Image<double>::iterator aptr = dst.beginw(), stop = dst.endw();
00069 
00070   while (aptr != stop) {
00071     double val;
00072     if(*aptr >  30.0) val = 1.0;
00073     else if(*aptr < -30.0) val = 0.0;
00074     else val =  1.0 / (1.0 + exp(-1.0 * (*aptr)));
00075     *aptr++ = val;
00076   }
00077 }
00078 
00079 // ######################################################################
00080 // 2-weight-layer network initialization
00081 void FeedForwardNetwork::init
00082 (int inunits, int hidunits, int outunits, double lrate, double mrate)
00083 {
00084   std::string fname("none");
00085   init(fname, fname, inunits, hidunits, outunits, lrate, mrate);
00086 }
00087 
00088 // ######################################################################
00089 // 2-weight-layer network initialization
00090 void FeedForwardNetwork::init(std::string wh_file, std::string wo_file,
00091                               int inunits, int hidunits, int outunits,
00092                               double lrate, double mrate)
00093 {
00094   // two layer network flag
00095   itsNumLayer = 2;
00096 
00097   // allocate input, hidden, output layer nodes
00098   itsInputLayerPotential.resize(1, inunits+1);
00099   itsHiddenLayerPotential.resize(1, hidunits+1);
00100   itsOutputLayerPotential.resize(1, outunits);
00101 
00102   // allocating input layer -> hidden layer weights
00103   // an extra input for inserting bias constant
00104   itsHiddenLayerWeight.resize(inunits+1, hidunits);
00105 
00106   // allocating hidden layer -> output layer weights
00107   itsOutputLayerWeight.resize(hidunits+1, outunits);
00108 
00109   // momentum not inserted as it did not help in simulation
00110   // and it costs time and memory
00111   //itsHiddenLayerMomentum.resize(inunits+1, hidunits);
00112   //itsOutputLayerMomentum.resize(hidunits+1, outunits);
00113 
00114   // initialize the bias nodes, they remain at 1.0
00115   itsInputLayerPotential.setVal(0, inunits, 1.0);
00116   itsHiddenLayerPotential.setVal(0, hidunits, 1.0);
00117 
00118   // allocate errors storage in the 3 layers
00119   // for error back propagations
00120   itsError.resize(1, outunits);
00121   itsHiddenLayerDelta.resize(1, hidunits);
00122   itsOutputLayerDelta.resize(1, outunits);
00123 
00124   // laerning and momentum rates
00125   itsLearningRate = lrate;
00126   itsMomentumRate = mrate;
00127 
00128   // initialize the input layer -> hidden layer weights
00129   FILE *fp; Image<double>::iterator aptr;
00130   if((fp = fopen(wh_file.c_str(),"rb")) == NULL)
00131     {
00132       LINFO("initializing wh with random weights");
00133       srand((unsigned)time(0));
00134       aptr =  itsHiddenLayerWeight.beginw();
00135       for(int i = 0; i < inunits+1; i++)
00136         for(int j = 0; j < hidunits; j++)
00137           *aptr++ = (-FFN_RW_RANGE/2.0) +
00138             (rand()/(RAND_MAX + 1.0) * FFN_RW_RANGE);
00139     }
00140     else
00141     {
00142       LINFO("reading whfile");
00143       Image<double> temp(hidunits, inunits+1, NO_INIT);
00144       double val; aptr =  temp.beginw();
00145       for(int i = 0; i < inunits+1; i++)
00146         for(int j = 0; j < hidunits; j++)
00147           { if (fread(&val, sizeof(double), 1, fp) != 1) LERROR("fread error"); *aptr++ = val; }
00148       itsHiddenLayerWeight = transpose(temp);
00149       fclose(fp);
00150     }
00151 
00152   // initialize the hidden layer -> output layer weights
00153   if((fp=fopen(wo_file.c_str(),"rb")) == NULL)
00154     {
00155       LINFO("initializing wo with random weights");
00156       srand((unsigned)time(0));
00157       aptr =  itsOutputLayerWeight.beginw();
00158       for(int i = 0; i < hidunits+1; i++)
00159         for(int j = 0; j < outunits; j++)
00160           *aptr++ = (-FFN_RW_RANGE/2.0) +
00161             (rand()/(RAND_MAX + 1.0) * FFN_RW_RANGE);
00162     }
00163   else
00164     {
00165       LINFO("reading wofile");
00166       Image<double> temp(outunits, hidunits+1, NO_INIT);
00167       double val; aptr =  temp.beginw();
00168       for(int i = 0; i < hidunits+1; i++)
00169         for(int j = 0; j < outunits; j++)
00170           { if (fread(&val, sizeof(double), 1, fp) != 1) LERROR("fread error"); *aptr++ = val; }
00171       itsOutputLayerWeight = transpose(temp);
00172       fclose(fp);
00173     }
00174 }
00175 
00176 // ######################################################################
00177 // 2-weight-layer network initialization
00178 void FeedForwardNetwork::init(Image<double> wh, Image<double> wo,
00179                               double lrate, double mrate)
00180 {
00181   // two layer network flag
00182   itsNumLayer = 2;
00183 
00184   ASSERT(wh.getHeight() == wo.getWidth() - 1);
00185   int inunits  = wh.getWidth() - 1;
00186   int hidunits = wh.getHeight();
00187   int outunits = wo.getHeight();
00188 
00189   // allocate input, hidden, output layer nodes
00190   itsInputLayerPotential.resize(1, inunits+1);
00191   itsHiddenLayerPotential.resize(1, hidunits+1);
00192   itsOutputLayerPotential.resize(1, outunits);
00193 
00194   // allocating input layer -> hidden layer weights
00195   // an extra input for inserting bias constant
00196   itsHiddenLayerWeight.resize(inunits+1, hidunits);
00197 
00198   // allocating hidden layer -> output layer weights
00199   itsOutputLayerWeight.resize(hidunits+1, outunits);
00200 
00201   // momentum not inserted as it did not help in simulation
00202   // and it costs time and memory
00203   //itsHiddenLayerMomentum.resize(inunits+1, hidunits);
00204   //itsOutputLayerMomentum.resize(hidunits+1, outunits);
00205 
00206   // initialize the bias nodes, they remain at 1.0
00207   itsInputLayerPotential.setVal(0, inunits, 1.0);
00208   itsHiddenLayerPotential.setVal(0, hidunits, 1.0);
00209 
00210   // allocate errors storage in the 3 layers
00211   // for error back propagations
00212   itsError.resize(1, outunits);
00213   itsHiddenLayerDelta.resize(1, hidunits);
00214   itsOutputLayerDelta.resize(1, outunits);
00215 
00216   // laerning and momentum rates
00217   itsLearningRate = lrate;
00218   itsMomentumRate = mrate;
00219 
00220   // assign the input layer -> hidden layer weights
00221   itsHiddenLayerWeight = wh;
00222 
00223   // assign the hidden layer -> output layer weights
00224   itsOutputLayerWeight = wo;
00225 }
00226 
00227 // ######################################################################
00228 // 3-weight-layer network initialization
00229 void FeedForwardNetwork::init3L
00230 ( int inunits, int hid1units, int hid2units,
00231   int outunits, double lrate, double mrate)
00232 {
00233   std::string fname("none");
00234   init3L(fname, fname, fname, inunits, hid1units, hid2units, outunits,
00235          lrate, mrate);
00236 }
00237 
00238 // ######################################################################
00239 // 3-weight-layer initialization
00240 void FeedForwardNetwork::init3L
00241 ( std::string wh_file, std::string wh2_file, std::string wo_file,
00242   int inunits, int hidunits, int hid2units,
00243   int outunits, double lrate, double mrate)
00244 {
00245   // flag for three layer network
00246   itsNumLayer = 3;
00247 
00248   // allocate input, hidden1, hidden2, output layer nodes
00249   itsInputLayerPotential.resize  (1, inunits+1);
00250   itsHiddenLayerPotential.resize (1, hidunits+1);
00251   itsHiddenLayer2Potential.resize(1, hid2units+1);
00252   itsOutputLayerPotential.resize (1, outunits);
00253 
00254   // allocating input layer -> hidden layer1 weights
00255   // an extra input for inserting bias constant
00256   itsHiddenLayerWeight.resize(inunits+1, hidunits);
00257 
00258   // allocating hidden layer1 -> hidden2 layer weights
00259   itsHiddenLayer2Weight.resize(hidunits+1, hid2units);
00260 
00261   // allocating hidden layer2 -> output layer weights
00262   itsOutputLayerWeight.resize(hid2units+1, outunits);
00263 
00264   // initialize the bias nodes, they remain at 1.0
00265   itsInputLayerPotential.setVal(0, inunits, 1.0);
00266   itsHiddenLayerPotential.setVal(0, hidunits, 1.0);
00267   itsHiddenLayer2Potential.setVal(0, hid2units, 1.0);
00268 
00269   // allocate errors storage in the 4 layers
00270   // for error back propagations
00271   itsError.resize(1, outunits);
00272   itsHiddenLayerDelta.resize(1, hidunits);
00273   itsHiddenLayer2Delta.resize(1, hid2units);
00274   itsOutputLayerDelta.resize(1, outunits);
00275 
00276   // initialize learning rate
00277   itsLearningRate = lrate;
00278   itsMomentumRate = mrate;
00279 
00280   // initialize the input layer -> hidden layer1 weights
00281   FILE *fp; Image<double>::iterator aptr;
00282   if((fp = fopen(wh_file.c_str(),"rb")) == NULL)
00283     {
00284       LINFO("initializing wh with random weights");
00285       srand((unsigned)time(0));
00286       aptr =  itsHiddenLayerWeight.beginw();
00287       for(int i = 0; i < inunits+1; i++)
00288         for(int j = 0; j < hidunits; j++)
00289           *aptr++ = (-FFN_RW_RANGE/2.0) +
00290             (rand()/(RAND_MAX + 1.0) * FFN_RW_RANGE);
00291     }
00292     else
00293     {
00294       LINFO("reading whfile");
00295       Image<double> temp(hidunits, inunits+1, NO_INIT);
00296       double val; aptr =  temp.beginw();
00297       for(int i = 0; i < inunits+1; i++)
00298         for(int j = 0; j < hidunits; j++)
00299           { if (fread(&val, sizeof(double), 1, fp) != 1) LERROR("fread error"); *aptr++ = val; }
00300       itsHiddenLayerWeight = transpose(temp);
00301       fclose(fp);
00302     }
00303 
00304   // initialize the hidden layer1 -> hidden layer2 weights
00305   if((fp = fopen(wh2_file.c_str(),"rb")) == NULL)
00306     {
00307       LINFO("initializing wh2 with random weights");
00308       srand((unsigned)time(0));
00309       aptr =  itsHiddenLayer2Weight.beginw();
00310       for(int i = 0; i < hidunits+1; i++)
00311         for(int j = 0; j < hid2units; j++)
00312           *aptr++ = (-FFN_RW_RANGE/2.0) +
00313             (rand()/(RAND_MAX + 1.0) * FFN_RW_RANGE);
00314     }
00315     else
00316     {
00317       LINFO("reading wh2file");
00318       Image<double> temp(hid2units, hidunits+1, NO_INIT);
00319       double val; aptr =  temp.beginw();
00320       for(int i = 0; i < hidunits+1; i++)
00321         for(int j = 0; j < hid2units; j++)
00322           { if (fread(&val, sizeof(double), 1, fp) != 1) LERROR("fread error"); *aptr++ = val; }
00323       itsHiddenLayer2Weight = transpose(temp);
00324       fclose(fp);
00325     }
00326 
00327   // initialize the hidden layer2 -> output layer weights
00328   if((fp=fopen(wo_file.c_str(),"rb")) == NULL)
00329     {
00330       LINFO("initializing wo with random weights");
00331       srand((unsigned)time(0));
00332       aptr =  itsOutputLayerWeight.beginw();
00333       for(int i = 0; i < hid2units+1; i++)
00334         for(int j = 0; j < outunits; j++)
00335           *aptr++ = (-FFN_RW_RANGE/2.0) +
00336             (rand()/(RAND_MAX + 1.0) * FFN_RW_RANGE);
00337     }
00338   else
00339     {
00340       LINFO("reading wofile");
00341       Image<double> temp(outunits, hid2units+1, NO_INIT);
00342       double val; aptr =  temp.beginw();
00343       for(int i = 0; i < hid2units+1; i++)
00344         for(int j = 0; j < outunits; j++)
00345           { if (fread(&val, sizeof(double), 1, fp) != 1) LERROR("fread error"); *aptr++ = val; }
00346       itsOutputLayerWeight = transpose(temp);
00347       fclose(fp);
00348     }
00349 }
00350 
00351 // ######################################################################
00352 // 3-weight-layer initialization
00353 void FeedForwardNetwork::init3L
00354 ( Image<double> wh, Image<double> wh2, Image<double> wo,
00355   double lrate, double mrate)
00356 {
00357   // flag for three layer network
00358   itsNumLayer = 3;
00359 
00360   ASSERT(wh.getHeight() == wh2.getWidth() - 1);
00361   ASSERT(wh2.getHeight() == wo.getWidth() - 1);
00362   int inunits   = wh.getWidth() - 1;
00363   int hidunits  = wh.getHeight();
00364   int hid2units = wh2.getHeight();
00365   int outunits  = wo.getHeight();
00366 
00367   // allocate input, hidden1, hidden2, output layer nodes
00368   itsInputLayerPotential.resize  (1, inunits+1);
00369   itsHiddenLayerPotential.resize (1, hidunits+1);
00370   itsHiddenLayer2Potential.resize(1, hid2units+1);
00371   itsOutputLayerPotential.resize (1, outunits);
00372 
00373   // allocating input layer -> hidden layer1 weights
00374   // an extra input for inserting bias constant
00375   itsHiddenLayerWeight.resize(inunits+1, hidunits);
00376 
00377   // allocating hidden layer1 -> hidden2 layer weights
00378   itsHiddenLayer2Weight.resize(hidunits+1, hid2units);
00379 
00380   // allocating hidden layer2 -> output layer weights
00381   itsOutputLayerWeight.resize(hid2units+1, outunits);
00382 
00383   // initialize the bias nodes, they remain at 1.0
00384   itsInputLayerPotential.setVal(0, inunits, 1.0);
00385   itsHiddenLayerPotential.setVal(0, hidunits, 1.0);
00386   itsHiddenLayer2Potential.setVal(0, hid2units, 1.0);
00387 
00388   // allocate errors storage in the 4 layers
00389   // for error back propagations
00390   itsError.resize(1, outunits);
00391   itsHiddenLayerDelta.resize(1, hidunits);
00392   itsHiddenLayer2Delta.resize(1, hid2units);
00393   itsOutputLayerDelta.resize(1, outunits);
00394 
00395   // initialize learning rate
00396   itsLearningRate = lrate;
00397   itsMomentumRate = mrate;
00398 
00399   // assign the input layer -> hidden layer weights
00400   itsHiddenLayerWeight = wh;
00401 
00402   // assign the hidden layer -> hidden layer 2 weights
00403   itsHiddenLayer2Weight = wh2;
00404 
00405   // assign the hidden layer 2 -> output layer weights
00406   itsOutputLayerWeight = wo;
00407 }
00408 
00409 // ######################################################################
00410 // run the 2-weight-layer network
00411 Image<double> FeedForwardNetwork::run(Image<double> input)
00412 {
00413   // set the input layer potential
00414   Image<double>::iterator aptr = input.beginw(), stop = input.endw();
00415   Image<double>::iterator bptr = itsInputLayerPotential.beginw();
00416   while (aptr != stop) *bptr++ = *aptr++;
00417 
00418   // compute hidden layer (bias stays at 1.0)
00419   Image<double> thlp =
00420     matrixMult(itsHiddenLayerWeight, itsInputLayerPotential);
00421   inPlaceSigmoid(thlp);
00422   aptr = thlp.beginw(), stop = thlp.endw();
00423   bptr = itsHiddenLayerPotential.beginw();
00424   while (aptr != stop) *bptr++ = *aptr++;
00425 
00426   // compute output layer
00427   itsOutputLayerPotential =
00428     matrixMult(itsOutputLayerWeight, itsHiddenLayerPotential);
00429   inPlaceSigmoid(itsOutputLayerPotential);
00430 
00431   return itsOutputLayerPotential;
00432 }
00433 
00434 // ######################################################################
00435 // run the 3-weight-layer network
00436 Image<double> FeedForwardNetwork::run3L(Image<double> input)
00437 {
00438   // set the input layer potential
00439   Image<double>::iterator aptr = input.beginw(), stop = input.endw();
00440   Image<double>::iterator bptr = itsInputLayerPotential.beginw();
00441   while (aptr != stop) *bptr++ = *aptr++;
00442 
00443   // compute hidden layer (bias stays at 1.0)
00444   Image<double> thlp =
00445     matrixMult(itsHiddenLayerWeight, itsInputLayerPotential);
00446   inPlaceSigmoid(thlp);
00447   aptr = thlp.beginw(), stop = thlp.endw();
00448   bptr = itsHiddenLayerPotential.beginw();
00449   while (aptr != stop) *bptr++ = *aptr++;
00450 
00451   // compute hidden2 layer (bias stays at 1.0)
00452   Image<double> thl2p =
00453     matrixMult(itsHiddenLayer2Weight, itsHiddenLayerPotential);
00454   inPlaceSigmoid(thl2p);
00455   aptr = thl2p.beginw(), stop = thl2p.endw();
00456   bptr = itsHiddenLayer2Potential.beginw();
00457   while (aptr != stop) *bptr++ = *aptr++;
00458 
00459   // compute output layer
00460   itsOutputLayerPotential =
00461     matrixMult(itsOutputLayerWeight, itsHiddenLayer2Potential);
00462   inPlaceSigmoid(itsOutputLayerPotential);
00463 
00464   return itsOutputLayerPotential;
00465 }
00466 
00467 // ######################################################################
00468 void FeedForwardNetwork::backprop(Image<double> target)
00469 {
00470   // propagate error from the output to hidden layer
00471   itsError = target - itsOutputLayerPotential;
00472   Image<double>
00473     onesO(1,itsOutputLayerPotential.getSize(), ZEROS); onesO += 1.0;
00474   itsOutputLayerDelta = itsError * itsOutputLayerPotential *
00475     (onesO - itsOutputLayerPotential);
00476 
00477   // propagate error from hidden layer to input layer
00478   Image<double>
00479     onesH(1,itsHiddenLayerPotential.getSize(), ZEROS); onesH += 1.0;
00480   Image<double> tempDh =
00481     matrixMult(transpose(itsOutputLayerWeight), itsOutputLayerDelta) *
00482     itsHiddenLayerPotential * (onesH - itsHiddenLayerPotential);
00483   itsHiddenLayerDelta =
00484     crop(tempDh,Point2D<int>(0,0), itsHiddenLayerDelta.getDims());
00485 
00486   // update weights in hidden -> output layer
00487   itsOutputLayerWeight +=
00488     (matrixMult(itsOutputLayerDelta, transpose(itsHiddenLayerPotential))
00489      * itsLearningRate);
00490 
00491   // update weights in input layer -> hidden layer
00492   itsHiddenLayerWeight +=
00493     matrixMult(itsHiddenLayerDelta, transpose(itsInputLayerPotential))
00494     * itsLearningRate;
00495 }
00496 
00497 // ######################################################################
00498 void FeedForwardNetwork::backprop3L(Image<double> target)
00499 {
00500   // propagate error from the output to hidden layer 2
00501   itsError = target - itsOutputLayerPotential;
00502   Image<double>
00503     onesO(1,itsOutputLayerPotential.getSize(), ZEROS); onesO += 1.0;
00504   itsOutputLayerDelta = itsError * itsOutputLayerPotential *
00505     (onesO - itsOutputLayerPotential);
00506 
00507   // propagate error from hidden layer 2 to hidden layer
00508   Image<double>
00509     onesH2(1,itsHiddenLayer2Potential.getSize(), ZEROS); onesH2 += 1.0;
00510   Image<double> tempDh2 =
00511     matrixMult(transpose(itsOutputLayerWeight), itsOutputLayerDelta) *
00512     itsHiddenLayer2Potential * (onesH2 - itsHiddenLayer2Potential);
00513   itsHiddenLayer2Delta =
00514     crop(tempDh2, Point2D<int>(0,0), itsHiddenLayer2Delta.getDims());
00515 
00516   // propagate error from hidden layer to input layer
00517   Image<double>
00518     onesH(1,itsHiddenLayerPotential.getSize(), ZEROS); onesH += 1.0;
00519   Image<double> tempDh =
00520     matrixMult(transpose(itsHiddenLayer2Weight), itsHiddenLayer2Delta) *
00521     itsHiddenLayerPotential * (onesH - itsHiddenLayerPotential);
00522   itsHiddenLayerDelta =
00523     crop(tempDh, Point2D<int>(0,0), itsHiddenLayerDelta.getDims());
00524 
00525   // update weights in hidden layer 2 -> output layer
00526   itsOutputLayerWeight +=
00527     (matrixMult(itsOutputLayerDelta, transpose(itsHiddenLayer2Potential))
00528      * itsLearningRate);
00529 
00530   // update weights in hidden layer -> hidden layer 2
00531   itsHiddenLayer2Weight +=
00532     matrixMult(itsHiddenLayer2Delta, transpose(itsHiddenLayerPotential))
00533     * itsLearningRate;
00534 
00535   // update weights in input layer -> hidden layer
00536   itsHiddenLayerWeight +=
00537     matrixMult(itsHiddenLayerDelta, transpose(itsInputLayerPotential))
00538     * itsLearningRate;
00539 }
00540 
00541 // ######################################################################
00542 // store current weights of 2-weight-layer network to the passed in files
00543 void FeedForwardNetwork::write(std::string wh_file, std::string wo_file)
00544 {
00545   FILE *fp;
00546 
00547   // store the hidden layer weights
00548   if((fp = fopen(wh_file.c_str(),"wb")) == NULL)
00549     LFATAL("can't open wh1");
00550   Image<double> temp = transpose(itsHiddenLayerWeight);
00551   Image<double>::iterator aptr = temp.beginw();
00552   Image<double>::iterator stop = temp.endw();
00553   while (aptr != stop)
00554     { double val = *aptr++; fwrite(&val, sizeof(double), 1, fp); }
00555   fclose(fp);
00556 
00557   // store the output layer weights
00558   if((fp = fopen(wo_file.c_str(),"wb")) == NULL)
00559     LFATAL("can't open wo");
00560   temp = transpose(itsOutputLayerWeight);
00561   aptr = temp.beginw(); stop = temp.endw();
00562   while (aptr != stop)
00563     { double val = *aptr++; fwrite(&val, sizeof(double), 1, fp); }
00564   fclose(fp);
00565   //fwrite(temp.getArrayPtr(), sizeof(double), temp.getSize(), fp);
00566 }
00567 
00568 // ######################################################################
00569 // store current weights of 3-weight-layer network
00570 // to the passed in files
00571 void FeedForwardNetwork::write3L
00572 (std::string wh_file, std::string wh2_file, std::string wo_file)
00573 {
00574   FILE *fp;
00575 
00576   // store the hidden layer1 weights
00577   if((fp = fopen(wh_file.c_str(),"wb")) == NULL)
00578     LFATAL("can't open wh1");
00579   Image<double> temp = transpose(itsHiddenLayerWeight);
00580   Image<double>::iterator aptr = temp.beginw();
00581   Image<double>::iterator stop = temp.endw();
00582   while (aptr != stop)
00583     { double val = *aptr++; fwrite(&val, sizeof(double), 1, fp); }
00584   fclose(fp);
00585 
00586   // store the hidden layer2 weights
00587   if((fp = fopen(wh2_file.c_str(),"wb")) == NULL)
00588     LFATAL("can't open wh2");
00589   temp = transpose(itsHiddenLayer2Weight);
00590   aptr = temp.beginw(); stop = temp.endw();
00591   while (aptr != stop)
00592     { double val = *aptr++; fwrite(&val, sizeof(double), 1, fp); }
00593   fclose(fp);
00594 
00595   // store the output layer weights
00596   if((fp = fopen(wo_file.c_str(),"wb")) == NULL)
00597     LFATAL("can't open wo");
00598   temp = transpose(itsOutputLayerWeight);
00599   aptr = temp.beginw(); stop = temp.endw();
00600   while (aptr != stop)
00601     { double val = *aptr++; fwrite(&val, sizeof(double), 1, fp); }
00602   fclose(fp);
00603 }
00604 
00605 // ######################################################################
00606 void FeedForwardNetwork::setLearningRate(double newLR)
00607 {
00608   itsLearningRate = newLR;
00609 }
00610 
00611 // ######################################################################
00612 /* So things look consistent in everyone's emacs... */
00613 /* Local Variables: */
00614 /* indent-tabs-mode: nil */
00615 /* End: */
Generated on Sun May 8 08:04:47 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3