FFN.H

Go to the documentation of this file.
00001 /*!@file Gist/FFN.H Feed Forward Network  for use to train any mapping       */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00005 // University of Southern California (USC) and the iLab at USC.         //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Created by Chris Ackerman
00034 // Primary maintainer for this file: Christian Siagian <siagian@usc.edu>
00035 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Gist/FFN.H $
00036 // $Id: FFN.H 12309 2009-12-18 01:26:50Z beobot $
00037 //
00038 
00039 #ifndef GIST_FFN_H
00040 #define GIST_FFN_H
00041 
00042 #include "Image/Image.H"
00043 
00044 #include <stdio.h>
00045 #include <math.h>
00046 #include <stdlib.h>
00047 #include <time.h>
00048 #include <string>
00049 
00050 #define FFN_RW_RANGE 1.0    // range of initial random weights [-.5 ... .5]
00051 
00052 // ######################################################################
00053 //! A Feed Forward Network
00054 class FeedForwardNetwork
00055 {
00056 
00057 public:
00058 
00059   // ######################################################################
00060   /*! @name Constructors, assignment, and Destructors */
00061   //@{
00062 
00063   //! Constructor
00064   FeedForwardNetwork();
00065 
00066   //! Destructor
00067   ~FeedForwardNetwork();
00068 
00069   //! Initialize a two-weight-layer network with saved weights
00070   //! momentum is unimplemented
00071   void init(std::string wh_file, std::string wo_file,
00072             int inunits, int hidunits, int outunits,
00073             double lrate, double mrate);
00074 
00075   //! Initialize a two-weight-layer network with small random weights
00076   //! momentum is unimplemented
00077   void init(int inunits, int hidunits, int outunits,
00078             double lrate, double mrate);
00079 
00080   //! Initialize a two-weight-layer network with Image<double> weights
00081   void init(Image<double> wh, Image<double> wo,
00082             double lrate, double mrate);
00083 
00084   //! Initialize a three-weight-layer network with saved weights
00085   //! momentum is unimplemented
00086   void init3L(std::string wh1_file, std::string wh2_file, std::string wo_file,
00087               int inunits, int hid1units, int hid2units, int outunits,
00088               double lrate, double mrate);
00089 
00090   //! Initialize a two-weight-layer network with small random weights
00091   //! momentum is unimplemented
00092   void init3L(int inunits, int hid1units, int hid2units,
00093               int outunits, double lrate, double mrate);
00094 
00095   //! Initialize a three-weight-layer network with Image<double> weights
00096   void init3L(Image<double> wh, Image<double> wh2, Image<double> wo,
00097               double lrate, double mrate);
00098 
00099   //! Run a two-weight-layer network with passed in input
00100   Image<double> run(Image<double> input);
00101 
00102   //! Run a three-weight-layer network with passed in input
00103   Image<double> run3L(Image<double> input);
00104 
00105   //! backpropagate the error in the two-weight-layer network
00106   //! with passed in target output (note: call run first)
00107   void backprop(Image<double> target);
00108 
00109   //! backpropagate the error in the three-weight-layer network
00110   //! with passed in target output (note: call run3L first)
00111   void backprop3L(Image<double> target);
00112 
00113   //! Write the two-weight-layer network weights to a specified file
00114   void write(std::string wh_file,
00115              std::string wo_file);
00116 
00117   //! Write the three-weight-layer network weights to a specified file
00118   void write3L(std::string wh1_file,
00119                std::string wh2_file,
00120                std::string wo_file);
00121 
00122   //! Set Learning rate - for simulated annealing
00123   void setLearningRate(double newLR);
00124 
00125   //! get the most recent output
00126   inline Image<double> getOutput();
00127 
00128 private:
00129 
00130   //! sigmoid function
00131   void inPlaceSigmoid(Image<double>& dst);
00132 
00133   //! input layer
00134   Image<double> itsInputLayerPotential;
00135 
00136   //! hidden layer
00137   Image<double> itsHiddenLayerPotential;
00138   Image<double> itsHiddenLayerWeight;
00139   Image<double> itsHiddenLayerMomentum;
00140   Image<double> itsHiddenLayer2Potential;
00141   Image<double> itsHiddenLayer2Weight;
00142 
00143   //! output layer
00144   Image<double> itsOutputLayerPotential;
00145   Image<double> itsOutputLayerWeight;
00146   Image<double> itsOutputLayerMomentum;
00147 
00148   // error information
00149   Image<double> itsError;
00150   Image<double> itsOutputLayerDelta;
00151   Image<double> itsHiddenLayerDelta;
00152   Image<double> itsHiddenLayer2Delta;
00153 
00154   //! the network's learning rate
00155   double itsLearningRate;
00156 
00157   //! the network's learning rate
00158   double itsMomentumRate;
00159 
00160   //! the number of hidden layers + output layer
00161   int itsNumLayer;
00162 };
00163 
00164 // ######################################################################
00165 // Implementation for FeedForwardNetwork inline functions
00166 // ######################################################################
00167 inline Image<double> FeedForwardNetwork::getOutput()
00168 {
00169   return itsOutputLayerPotential;
00170 }
00171 
00172 #endif
00173 
00174 // ######################################################################
00175 /* So things look consistent in everyone's emacs... */
00176 /* Local Variables: */
00177 /* indent-tabs-mode: nil */
00178 /* End: */
Generated on Sun May 8 08:40:39 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3