00001 /*!@file CINNIC/contourRun2.H CINNIC classes */ 00002 00003 // //////////////////////////////////////////////////////////////////// // 00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the // 00005 // University of Southern California (USC) and the iLab at USC. // 00006 // See http://iLab.usc.edu for information about this project. // 00007 // //////////////////////////////////////////////////////////////////// // 00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00010 // in Visual Environments, and Applications'' by Christof Koch and // 00011 // Laurent Itti, California Institute of Technology, 2001 (patent // 00012 // pending; application number 09/912,225 filed July 23, 2001; see // 00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00014 // //////////////////////////////////////////////////////////////////// // 00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00016 // // 00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00018 // redistribute it and/or modify it under the terms of the GNU General // 00019 // Public License as published by the Free Software Foundation; either // 00020 // version 2 of the License, or (at your option) any later version. // 00021 // // 00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00025 // PURPOSE. See the GNU General Public License for more details. // 00026 // // 00027 // You should have received a copy of the GNU General Public License // 00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00030 // Boston, MA 02111-1307 USA. // 00031 // //////////////////////////////////////////////////////////////////// // 00032 // 00033 // Primary maintainer for this file: T Nathan Mundhenk <mundhenk@usc.edu> 00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/CINNIC/contourRun2.H $ 00035 // $Id: contourRun2.H 9412 2008-03-10 23:10:15Z farhan $ 00036 // 00037 00038 #ifndef CONTOURRUN2_H_DEFINED 00039 #define CONTOURRUN2_H_DEFINED 00040 00041 #include "CINNIC/contourNeuron.H" 00042 #include "CINNIC/contourNeuronProp2.H" 00043 00044 #include "Raster/Raster.H" 00045 00046 #include "Image/Pixels.H" 00047 #include "Image/Image.H" 00048 00049 #include "Util/Timer.H" 00050 00051 #include <vector> 00052 00053 // ############################################################ 00054 // ############################################################ 00055 // ##### ---CINNIC2--- 00056 // ##### Contour Integration: 00057 // ##### T. Nathan Mundhenk nathan@mundhenk.com 00058 // ############################################################ 00059 // ############################################################ 00060 //CLASSES: 00061 //NeuronChargeHold 00062 //ContourNeuronProp 00063 //RunNeuron 00064 00065 //############################################################### 00066 //Holder of the basic properties of each contour neuron 00067 //HOW THE HECK DOES THIS CLASS WORK... 00068 //this class stores and releises charges for the CINNIC neurons 00069 //Other neurons will place a charge in into this neuron using the 00070 //Charge method. These charges are stored in a resizable vector 00071 //in a container class called NeuronChargeHold which is then 00072 //stored in a vector called HistoryVector. 00073 // 00074 //When it is a neurons turn to run it does several things. This classes 00075 //part is to take a charge from another neuron that is running and store it 00076 //INT this neuron. When this neuron runs it then handles requests to hand 00077 //back the charges stored in here so that this neuron can pass a charge 00078 //to another neuron with an appropriate charge. 00079 00080 //########################################################################## 00081 00082 //This class will run the CINNIC neuron on a given Matrix of angle values 00083 //to another node. This is factored with timestep. 00084 //static ContourNeuronPropVec (*NeuronMatrix)[ImageSizeX][ImageSizeY]; 00085 00086 #define CONTOUR_RUN2_DEC template <unsigned short kernelSize, \ 00087 unsigned short scales, \ 00088 unsigned short orientations, \ 00089 unsigned short iterations, \ 00090 class FLOAT, class INT> 00091 00092 #define CONTOUR_RUN2_CLASS contourRun2<kernelSize,scales,orientations, \ 00093 iterations,FLOAT,INT> 00094 00095 //! Run the hyper column on a given image with a given connection template 00096 /*! This is the central iterative process to CINNIC. It takes as input 00097 a 3D ImageMap and a 4D PropHold made from ContourNeuronCreate. It matches 00098 each neuron in the hyper column against each other neuron in the 00099 hyper column. Energy is tranfered to the other neuron based on a product 00100 of the two neurons current exitation from the ImageMap and the weight of 00101 thier connections. A negative weight signifies inhabition. Energys are 00102 allowed to build iteratively in an upper layer SalMap built into this class. 00103 */ 00104 CONTOUR_RUN2_DEC 00105 class contourRun2 00106 { 00107 private: 00108 //! this is set to true if we use a frame series, not a single image 00109 bool CONTuseFrameSeries; 00110 const char* CONTimageOutDir; 00111 const char* CONTimageSaveTo; 00112 const char* CONTlogSaveTo; 00113 unsigned short CONTgroups; 00114 INT CONTadaptType, CONTdoFastPlast, CONTlastIterOnly, CONTdoTableOnly; 00115 INT CONTsetImageSizeX, CONTsetImageSizeY; 00116 INT CONTdoGroupSupression, CONTdoPassThrough; 00117 //! the current iteration frame in the CINNIC 00118 INT CONTcurrentIter; 00119 //! if we are using frames this is different than CONTcurrentIter 00120 INT CONTcurrentFrame; 00121 //! Store current iteration frame in the CINNIC 00122 INT CONTstoreCurrentIter; 00123 //! counts individual iterations over neurons 00124 unsigned long CONTiterCounter; 00125 FLOAT CONTtimestep, CONTmaxEnergy; 00126 FLOAT CONTupperLimit, CONTfastPlast; 00127 FLOAT CONTgroupTop, CONTgroupBottom, CONTsupressionAdd; 00128 FLOAT CONTsupressionSub, CONTadaptNeuronThresh, CONTorThresh; 00129 FLOAT CONTadaptNeuronMax, CONTleak, CONTinitialGroupVal; 00130 FLOAT CONTpassThroughGain; 00131 FLOAT CONTenergy, CONTexcMult; 00132 //! how quickly do we lose fast plasticity (decay term) 00133 FLOAT CONTplastDecay; 00134 const Point2D<int> *point; 00135 //! stores values for subsequent iterations 00136 Image<FLOAT> CONTsalMap; 00137 //! stores the map of group membership for hyper columns 00138 Image<FLOAT> CONTgroup; 00139 00140 //! base activity for this group - supression 00141 std::vector<FLOAT> CONTgroupMod; 00142 //! base activity for this group - 1/supression 00143 std::vector<FLOAT> CONTgroupMod2; 00144 //! holds the finite difference for this group from t-1 to t 00145 std::vector<FLOAT> CONTgroupDelta; 00146 std::vector< Image<FLOAT> > CONTsalMapIterations; 00147 std::vector< Image<FLOAT> > CONTgroupMap; 00148 std::vector< Image<FLOAT> > CONTimageOpt; 00149 std::vector< Image<FLOAT> > CONTcombinedSalMap; 00150 00151 ContourNeuronCreate<FLOAT> *CONTneuron; //copy the generic neuron 00152 PropHold *Neurons; //hold a property set for each neuron 00153 00154 //! Hold the values for each neuron over time 00155 std::vector< std::vector< std::vector< std::vector< 00156 ContourNeuronProp2<FLOAT,INT> > > > > CONTneuronMatrix; 00157 00158 //! Holds temporally static values for each neuron 00159 std::vector< std::vector< std::vector< 00160 staticContourNeuronProp<FLOAT,INT> > > > CONTstaticNeuronMatrix; 00161 //********************************************************************* 00162 // Private member function 00163 //********************************************************************* 00164 00165 //! reset the charges at iteration iter 00166 void CONTresetCharge(const INT iter); 00167 //! Set up initial values and optimizations, run before runImage 00168 void CONTpreImage(const std::vector< Image<FLOAT> > &imageMap, 00169 const ContourNeuronCreate<FLOAT> &N); 00170 //! sets up CONTimageOpt 00171 void CONTsetImageOpt(const std::vector< Image<FLOAT> > &imageMap, bool resize); 00172 00173 //! Like runImage except that a sigmoid is used to approximate neuron fireing rate 00174 /*! Insted of using neuron firering potential is 00175 routed through a sigmoid which 00176 approximates the rate of firering. 00177 @param Image This is the ImageMap of orientations 00178 @param N This is the contour neuron template 00179 */ 00180 void CONTrunImageSigmoid(const std::vector< Image<FLOAT> > &imageMap, 00181 const ContourNeuronCreate<FLOAT> &N, 00182 const INT iter, const INT lastIter, 00183 const INT nextIter, const bool init); 00184 //! calculate top level saliency map from hyper-column 00185 void CONTcalcSalMap(const std::vector< Image<FLOAT> > &imageMap, 00186 const INT iter); 00187 //! process saliency map incl. leak and storage 00188 void CONTprocessSalMap(const std::vector< Image<FLOAT> > &imageMap, 00189 const INT iter); 00190 //! Calculate group suppression changes 00191 /*! find new group weights based on the delta of excitation 00192 @param Image This is the ImageMap of orientations 00193 @param N This is the contour neuron template 00194 */ 00195 void CONTcalcGroups(const std::vector< Image<FLOAT> > &imageMap, 00196 const INT iter, const INT lastIter, 00197 const bool init); 00198 //! set pointers from static neurons to its group membership 00199 void CONTsetGroupPointers(const std::vector< Image<FLOAT> > &imageMap, 00200 const INT a, const INT iter); 00201 //! compute fast plasticity on neurons 00202 inline void CONTfindFastPlasticity( const std::vector< Image<FLOAT> > &imageMap, 00203 const INT a, const INT iter); 00204 //! compute the pass through gain on this image 00205 inline void CONTfindPassThroughGain(const std::vector< Image<FLOAT> > &imageMap, 00206 const INT a, const INT iter, 00207 const INT nextIter); 00208 //! one iterative process, as exaple called from runImage. Math using convolution 00209 /*! This accounts for one iterations at a time of the hyper column 00210 it will take the ImageMap and template neuron from ContourNeuronCreate 00211 and calculate this iterations energy values 00212 @param iter This is the current iteration number 00213 @param Image The 3D image map of orientations 00214 @param N The emplate neuron 00215 @param node The node number of this process, 00216 defaults to -1 for singe process operations 00217 */ 00218 inline void CONTiterateConvolve(const std::vector< Image<FLOAT> > &imageMap, 00219 const ContourNeuronCreate<FLOAT> &N, 00220 const INT node, const INT iter, 00221 const INT lastIter, const bool init); 00222 //! This is the second part of iterate convolve using the convolutoins 00223 /*! This accounts for one iterations at a time of the hyper column 00224 it will take the ImageMap and template neuron from ContourNeuronCreate 00225 and calculate this iterations energy values 00226 NOTE: This version excludes cascades 00227 @param iter This is the current iteration number 00228 @param Image The 3D image map of orientations 00229 @param N The emplate neuron 00230 @param node The node number of this process, defaults to -1 00231 for singe process operations 00232 */ 00233 inline void CONTconvolveSimpleInit( 00234 const std::vector< Image<FLOAT> > &imageMap, 00235 const ContourNeuronCreate<FLOAT> &N, 00236 const INT a, const INT b, const INT node, 00237 const INT iter, const INT nextIter); 00238 //! This is the second part of iterate convolve using the convolutoins 00239 /*! This accounts for one iterations at a time of the hyper column 00240 it will take the ImageMap and template neuron from ContourNeuronCreate 00241 and calculate this iterations energy values 00242 NOTE: This version excludes cascades 00243 @param iter This is the current iteration number 00244 @param Image The 3D image map of orientations 00245 @param N The emplate neuron 00246 @param node The node number of this process, defaults to -1 00247 for singe process operations 00248 */ 00249 inline void CONTconvolveSimpleOld( 00250 const std::vector< Image<FLOAT> > &imageMap, 00251 const ContourNeuronCreate<FLOAT> &N, 00252 const INT a, const INT b, const INT node, 00253 const INT iter, const INT nextIter); 00254 //! This is the second part of iterate convolve using the convolutoins 00255 /*! This accounts for one iterations at a time of the hyper column 00256 it will take the ImageMap and template neuron from ContourNeuronCreate 00257 and calculate this iterations energy values 00258 NOTE: This version excludes cascades 00259 @param iter This is the current iteration number 00260 @param Image The 3D image map of orientations 00261 @param N The emplate neuron 00262 @param node The node number of this process, defaults to -1 00263 for singe process operations 00264 */ 00265 inline void CONTconvolveSimple(const std::vector< Image<FLOAT> > &imageMap, 00266 const ContourNeuronCreate<FLOAT> &N, 00267 const INT a, const INT node, 00268 const INT iter, const INT nextIter); 00269 //! This is the second part of iterate convolve using the convolutoins 00270 /*! This accounts for one iterations at a time of the hyper column 00271 it will take the ImageMap and template neuron from ContourNeuronCreate 00272 and calculate this iterations energy values 00273 NOTE: This version excludes cascades 00274 @param iter This is the current iteration number 00275 @param Image The 3D image map of orientations 00276 @param N The emplate neuron 00277 @param node The node number of this process, defaults to -1 00278 for singe process operations 00279 */ 00280 inline void CONTconvolveSimpleFrames( 00281 const std::vector< Image<FLOAT> > &imageMap, 00282 const ContourNeuronCreate<FLOAT> &N, 00283 const INT a, const INT node, 00284 const INT iter, const INT nextIter); 00285 //! Calculate f with sigmoid described in Kock, Biophysics of Computation 00286 /*! @param beta This is a positive constant for the gain rate 00287 @param v This is the generator potential 00288 For beta higher numbers mean faster gain. a beta of 1 00289 will start at approx -1 and 00290 go to 1 with a v of 0 f will be .5 in all cases 00291 */ 00292 FLOAT CONTsigmoid(const FLOAT beta, const FLOAT v) const; 00293 //! Another sigmoid function 00294 /* @param beta this is the ceiling for the function 00295 @param v this is the potential input 00296 */ 00297 FLOAT CONTsigmoid2(const FLOAT beta, const FLOAT v) const; 00298 //! This method is used to pre-process for sigmoid based on a threshold 00299 /*! @param beta The beta param in the sigmoid 00300 @param v The generator potential at its current level 00301 @param thresh This is the max threshold v should reach 00302 */ 00303 FLOAT CONTpreSigmoid(const FLOAT v, const FLOAT thresh, 00304 const FLOAT beta = 1) const; 00305 //! reset the energy matrix 00306 void CONTresetMatrix(); 00307 //! set the image size 00308 void CONTsetImageSize(const INT X, const INT Y); 00309 //! Set configurations for this run 00310 /*! @param readConfig This is the object that contains the config file*/ 00311 void CONTsetConfig(readConfig &config); 00312 00313 00314 00315 public: 00316 // ###################################################################### 00317 // Public members etc. 00318 // ###################################################################### 00319 const static unsigned short CONTkernelSize = kernelSize; 00320 const static unsigned short CONTscales = scales; 00321 const static unsigned short CONTorientations = orientations; 00322 const static unsigned short CONTiterations = iterations; 00323 00324 // the values for these static floating-point constants have to be 00325 // given out of line (see note on 00326 // http://gcc.gnu.org/onlinedocs/gcc/Deprecated-Features.html: "G++ 00327 // allows static data members of const floating-point type to be 00328 // declared with an initializer in a class definition. The standard 00329 // only allows initializers for static members of const integral 00330 // types and const enumeration types so this extension has been 00331 // deprecated and will be removed from a future version."). 00332 00333 //! image values smaller than this are ignored to optimize 00334 const static FLOAT CONTsmallNumber; 00335 //! maxmimum value for group supression 00336 const static FLOAT CONTmaxGroupSupress; 00337 //! minimum value for group supression 00338 const static FLOAT CONTminGroupSupress; 00339 //! maxmimum value for fast plasticity 00340 const static FLOAT CONTmaxFastPlasticity; 00341 //! minimum value for fast plasticity 00342 const static FLOAT CONTminFastPlasticity; 00343 00344 //! default constructor 00345 contourRun2(); 00346 //! default destructor 00347 ~contourRun2(); 00348 //! toggles whether to use frame series for movies 00349 void CONTtoggleFrameSeries(bool toggle); 00350 //! copy the combined sal map to this class for analysis or dumping 00351 void CONTcopyCombinedSalMap(const std::vector< Image<FLOAT> > &CSM); 00352 //! returned the processed salmap for this object at iter 00353 Image<FLOAT> CONTgetSMI(const INT iter); 00354 //! find the time to total energy ratio 00355 void CONTderiveEnergy(); 00356 //! This is a short cut method to run runImage + other methods 00357 /*! This will run setConfig setImageSize, resetMatrix and runImage 00358 for you. 00359 @param Image the processed image map of orientations from ImageMap 00360 @param N This is the generic neuron template from ContourNeuronCreate 00361 @param config This is the config file object from readConfig.C 00362 @param sizeX The size of the image in X 00363 @param sizeY The size of the image in Y 00364 */ 00365 void CONTcontourRunMain(const std::vector< Image<FLOAT> > &imageMap, 00366 const ContourNeuronCreate<FLOAT> &N, 00367 readConfig &config, 00368 const Image<FLOAT> &group, 00369 const INT groups, 00370 const INT iter, 00371 const FLOAT groupTop); 00372 00373 void CONTcontourRunFrames(const std::vector< Image<FLOAT> > &imageMap, 00374 const ContourNeuronCreate<FLOAT> &N, 00375 readConfig &config, 00376 const Image<FLOAT> &group, 00377 const INT groups, 00378 const INT frame, 00379 const FLOAT groupTop); 00380 //! display and save fast plasticity activity for iteration 00381 void CONToutputFastPlasticity(INT iter); 00382 //! display and save group supression activity for iteration 00383 void CONToutputGroupSupression(INT iter); 00384 //! get the current iteration pointer for returning data 00385 INT CONTgetCurrentIter(); 00386 }; 00387 00388 // ###################################################################### 00389 /* So things look consistent in everyone's emacs... */ 00390 /* Local Variables: */ 00391 /* indent-tabs-mode: nil */ 00392 /* End: */ 00393 00394 #endif