contourRun2.C

Go to the documentation of this file.
00001 /*!@file CINNIC/contourRun2.C CINNIC classes - src3 */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00005 // University of Southern California (USC) and the iLab at USC.         //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: T Nathan Mundhenk <mundhenk@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/CINNIC/contourRun2.C $
00035 // $Id: contourRun2.C 6854 2006-07-18 18:24:42Z rjpeters $
00036 //
00037 
00038 // ############################################################
00039 // ############################################################
00040 // ##### ---CINNIC2---
00041 // ##### Contour Integration:
00042 // ##### T. Nathan Mundhenk nathan@mundhenk.com
00043 // ############################################################
00044 // ############################################################
00045 
00046 #include "CINNIC/contourRun2.H"
00047 
00048 #include "Util/Assert.H"
00049 #include "Util/log.H"
00050 
00051 #include "Image/ColorOps.H"
00052 #include "Image/ShapeOps.H"
00053 
00054 #include <cmath>
00055 #include <cstdlib>
00056 
00057 using std::vector;
00058 
00059 static float Resistance;  // pulled out of contourRun.H to eliminate warning
00060 
00061 
00062 //#################################################################
00063 CONTOUR_RUN2_DEC CONTOUR_RUN2_CLASS::contourRun2()
00064 {
00065   CONTuseFrameSeries = false;
00066 }
00067 
00068 //#################################################################
00069 CONTOUR_RUN2_DEC CONTOUR_RUN2_CLASS::~contourRun2()
00070 {
00071 }
00072 
00073 //#################################################################
00074 CONTOUR_RUN2_DEC
00075 void CONTOUR_RUN2_CLASS::CONTtoggleFrameSeries(bool toggle)
00076 {
00077   CONTuseFrameSeries = toggle;
00078 }
00079 
00080 //#################################################################
00081 CONTOUR_RUN2_DEC Image<FLOAT> CONTOUR_RUN2_CLASS::CONTgetSMI(const INT iter)
00082 {
00083   return CONTsalMapIterations[iter];
00084 }
00085 
00086 //#################################################################
00087 CONTOUR_RUN2_DEC
00088 void CONTOUR_RUN2_CLASS::CONTcopyCombinedSalMap(const std::vector< Image<FLOAT> > &CSM)
00089 {
00090   CONTcombinedSalMap = CSM;
00091 }
00092 
00093 //#################################################################
00094 CONTOUR_RUN2_DEC
00095 void CONTOUR_RUN2_CLASS::CONTsetConfig(readConfig &config)
00096 {
00097   CONTtimestep          = config.getItemValueF("timestep");
00098   CONTmaxEnergy         = config.getItemValueF("maxEnergy");
00099   Resistance            = config.getItemValueF("Resistance");
00100   CONTimageSaveTo       = config.getItemValueC("imageSaveTo");
00101   CONTlogSaveTo         = config.getItemValueC("logSaveTo");
00102   CONTupperLimit        = config.getItemValueF("upperLimit");
00103   CONTimageOutDir       = config.getItemValueC("imageOutDir");
00104   CONTgroupBottom       = config.getItemValueF("groupBottom");
00105   CONTsupressionAdd     = config.getItemValueF("supressionAdd");
00106   CONTsupressionSub     = config.getItemValueF("supressionSub");
00107   CONTadaptType         = (INT)config.getItemValueF("adaptType");
00108   CONTadaptNeuronThresh = config.getItemValueF("adaptNeuronThresh");
00109   CONTadaptNeuronMax    = config.getItemValueF("adaptNeuronMax");
00110   CONTexcMult           = config.getItemValueF("excMult");
00111   CONTleak              = config.getItemValueF("leak");
00112   CONTorThresh          = config.getItemValueF("orThresh");
00113   CONTinitialGroupVal   = config.getItemValueF("initialGroupVal");
00114   CONTfastPlast         = config.getItemValueF("fastPlast");
00115   CONTdoFastPlast       = (INT)config.getItemValueF("doFastPlast");
00116   CONTdoGroupSupression = (INT)config.getItemValueF("doGroupSupression");
00117   CONTdoPassThrough     = (INT)config.getItemValueF("doPassThrough");
00118   CONTlastIterOnly      = (INT)config.getItemValueF("lastIterOnly");
00119   CONTdoTableOnly       = (INT)config.getItemValueF("doTableOnly");
00120   CONTpassThroughGain   = config.getItemValueF("passThroughGain");
00121   CONTplastDecay        = config.getItemValueF("plastDecay");
00122 }
00123 
00124 //#################################################################
00125 CONTOUR_RUN2_DEC
00126 FLOAT CONTOUR_RUN2_CLASS::CONTsigmoid(const FLOAT beta, const FLOAT v) const
00127 {
00128   return (1.0f / (1.0f + pow(2.71828f, (-2.0f * (beta * v)))));
00129 }
00130 //#################################################################
00131 CONTOUR_RUN2_DEC
00132 FLOAT CONTOUR_RUN2_CLASS::CONTsigmoid2(const FLOAT beta, const FLOAT v) const
00133 {
00134   return(1.0f / (1.0f + pow(2.71828f, (-1.0f * (beta+v) ))));
00135 }
00136 //#################################################################
00137 CONTOUR_RUN2_DEC
00138 FLOAT CONTOUR_RUN2_CLASS::CONTpreSigmoid(const FLOAT v, const FLOAT thresh,
00139                                          const FLOAT beta) const
00140 {
00141   if(v >= thresh) //optimization (?)
00142   {
00143     return (thresh-1);
00144   }
00145   else
00146   {
00147     const FLOAT sig = CONTsigmoid(beta,(((v/thresh)*(2*(1/beta))-(1/beta))));
00148     return (sig*thresh);
00149   }
00150 }
00151 
00152 //#################################################################
00153 CONTOUR_RUN2_DEC
00154 void CONTOUR_RUN2_CLASS::CONTsetImageSize(const INT X, const INT Y)
00155 {
00156   ASSERT((X > 0) && (Y > 0));
00157   CONTsetImageSizeY = Y;
00158   CONTsetImageSizeX = X;
00159 }
00160 
00161 //#################################################################
00162 /*! find the max energy per time slice, timestep is 1/x seconds where
00163   x = timestep
00164  */
00165 CONTOUR_RUN2_DEC
00166 void CONTOUR_RUN2_CLASS::CONTderiveEnergy()
00167 {
00168   CONTenergy = CONTmaxEnergy/CONTtimestep;
00169 }
00170 
00171 //#################################################################
00172 /*! this method will reset the neural matrix. Must be called before run image
00173  */
00174 CONTOUR_RUN2_DEC
00175 void CONTOUR_RUN2_CLASS::CONTresetMatrix()
00176 {
00177   // resize static neuron matrix
00178 
00179   std::vector<            staticContourNeuronProp<FLOAT,INT> >   smat1;
00180   std::vector<std::vector<staticContourNeuronProp<FLOAT,INT> > > smat2;
00181 
00182   staticContourNeuronProp<FLOAT,INT> CONTsprop;
00183   smat1.resize(            CONTsetImageSizeY       ,CONTsprop);
00184   smat2.resize(            CONTsetImageSizeX       ,smat1);
00185   CONTstaticNeuronMatrix.resize(CONTorientations   ,smat2);
00186 
00187   INT ID = 0;
00188 
00189   for(unsigned short i = 0; i < CONTorientations; i++)
00190   {
00191     for(INT j = 0; j < CONTsetImageSizeX; j++)
00192     {
00193       for(INT k = 0; k < CONTsetImageSizeY; k++)
00194       {
00195         CONTstaticNeuronMatrix[i][j][k].sCNP_setID(ID);
00196         ID++;
00197       }
00198     }
00199   }
00200 
00201   // resize dynamic neuron matrix
00202 
00203   std::vector<                        ContourNeuronProp2<FLOAT,INT> >     mat1;
00204   std::vector<std::vector<            ContourNeuronProp2<FLOAT,INT> > >   mat2;
00205   std::vector<std::vector<std::vector<ContourNeuronProp2<FLOAT,INT> > > > mat3;
00206 
00207   ContourNeuronProp2<FLOAT,INT>       CONTprop;
00208   mat1.resize(            CONTsetImageSizeY ,CONTprop);
00209   mat2.resize(            CONTsetImageSizeX ,mat1);
00210   mat3.resize(            CONTorientations  ,mat2);
00211   CONTneuronMatrix.resize(CONTiterations+2  ,mat3);
00212 
00213 
00214   for(unsigned short n = 0; n < CONTiterations+2; n++)
00215   {
00216     for(unsigned short i = 0; i < CONTorientations; i++)
00217     {
00218       for(INT j = 0; j < CONTsetImageSizeX; j++)
00219       {
00220         for(INT k = 0; k < CONTsetImageSizeY; k++)
00221         {
00222           CONTneuronMatrix[n][i][j][k].CNP_resetCharge();
00223           CONTneuronMatrix[n][i][j][k].CNP_linkToStaticMap(
00224                                        &CONTstaticNeuronMatrix[i][j][k]);
00225         }
00226       }
00227     }
00228   }
00229 }
00230 
00231 //#################################################################
00232 CONTOUR_RUN2_DEC
00233 void CONTOUR_RUN2_CLASS::CONTresetCharge(const INT iter)
00234 {
00235   for(unsigned short i = 0; i < CONTorientations; i++)
00236   {
00237     for(INT j = 0; j < CONTsetImageSizeX; j++)
00238     {
00239       for(INT k = 0; k < CONTsetImageSizeY; k++)
00240       {
00241         CONTneuronMatrix[iter][i][j][k].CNP_resetCharge();
00242       }
00243     }
00244   }
00245 }
00246 
00247 //#################################################################
00248 CONTOUR_RUN2_DEC
00249 void CONTOUR_RUN2_CLASS::CONTpreImage(const std::vector< Image<FLOAT> > &imageMap,
00250                                       const ContourNeuronCreate<FLOAT> &N)
00251 {
00252   CONTiterCounter = 0;
00253   CONTderiveEnergy();
00254 
00255   CONTsalMap.resize(CONTsetImageSizeX,CONTsetImageSizeY,true);
00256   CONTsalMapIterations.resize(CONTiterations,CONTsalMap);
00257 
00258   CONTgroupMap.resize(CONTiterations,CONTsalMap);
00259   CONTimageOpt.resize(CONTorientations,imageMap[1]);
00260 
00261   //move through the entire image  ImageMap ImageOpt;
00262   CONTsetImageOpt(imageMap,true);
00263 }
00264 
00265 //#################################################################
00266 CONTOUR_RUN2_DEC
00267 void CONTOUR_RUN2_CLASS::CONTsetImageOpt(const std::vector< Image<FLOAT> > &imageMap,
00268                                          bool resize)
00269 {
00270   for(unsigned short a = 0; a < CONTorientations; a++)
00271   {
00272     if(resize == true)
00273       CONTimageOpt[a].resize(imageMap[a].getWidth(),imageMap[a].getHeight(),ZEROS);
00274     for(INT i = 0; i < CONTsetImageSizeX; i++)
00275     {
00276       for(INT j = 0; j < CONTsetImageSizeY; j++)
00277       {
00278         CONTimageOpt[a].setVal(i,j,(imageMap[a].getVal(i,j)*CONTenergy));
00279       }
00280     }
00281   }
00282 }
00283 
00284 //#################################################################
00285 CONTOUR_RUN2_DEC
00286 void CONTOUR_RUN2_CLASS::CONTcalcSalMap(const std::vector< Image<FLOAT> > &imageMap,
00287                                         const INT iter)
00288 {
00289   //First check potentials for each column at this iteration,
00290   //reset if nessessary
00291   //First check potentials for each column at this iteration,
00292   //reset if nessessary
00293   for(INT a = 0; a < CONTorientations; a++)
00294   {
00295     for(INT i = 0; i < CONTsetImageSizeX; i++)
00296     {
00297       for(INT j = 0; j < CONTsetImageSizeY; j++)
00298       {
00299         //Add up all charges in this column
00300         //if charge is negative then make zero
00301         if(CONTneuronMatrix[iter][a][i][j].CNP_getCharge() < 0)
00302         {
00303           CONTneuronMatrix[iter][a][i][j].CNP_resetCharge();
00304         }
00305         // add this iteration plus all others combined here
00306         // i.e. here lies the integrator for each pixel!
00307         const FLOAT hold = CONTsalMap.getVal(i,j) +
00308           CONTneuronMatrix[iter][a][i][j].CNP_getCharge();
00309 
00310         CONTsalMap.setVal(i,j,hold);
00311       }
00312     }
00313   }
00314 }
00315 
00316 //#################################################################
00317 CONTOUR_RUN2_DEC
00318 void CONTOUR_RUN2_CLASS::CONTprocessSalMap(
00319                                      const std::vector< Image<FLOAT> > &imageMap,
00320                                      const INT iter)
00321 {
00322   //CONTsalMapIterations[iter].resize(CONTsetImageSizeX,CONTsetImageSizeY,true);
00323   //CONTgroupMap[iter].resize(CONTsetImageSizeX,CONTsetImageSizeY,true);
00324   for(INT i = 0; i < CONTsetImageSizeX; i++)
00325   {
00326     for(INT j = 0; j < CONTsetImageSizeY; j++)
00327     {
00328       // leak this neuron
00329       FLOAT hold1 = CONTsalMap.getVal(i,j) - CONTleak;
00330       // bottom this neuron to 0 charge if negative
00331       if(hold1 < 0){hold1 = 0;}
00332       // Set the sal map to this value
00333       CONTsalMap.setVal(i,j,hold1);
00334 
00335       // Compute sigmoid of column this iteration
00336       const FLOAT hold2 =
00337         CONTpreSigmoid(CONTsalMap.getVal(i,j),CONTupperLimit);
00338       // set output (for movie) to a value from 0 to 256
00339       CONTgroupMap[iter].setVal(i,j,hold2);
00340       // set value into this iteration normalized
00341       if((i > 0) && (j > 0))
00342       {
00343         CONTsalMapIterations[iter].setVal((i-1),(j-1)
00344                                           ,((hold2/CONTupperLimit)*255));
00345       }
00346     }
00347   }
00348 }
00349 
00350 /*
00351    OUTER LOOP/GROUP SUPRESSION COMPUTATION
00352    here we execute outer portions of the 6 layer loop in CINNIC
00353    for psuedo-convolution
00354    We also compute the group supression based upon change in groups
00355    overall total activity change (derivative)
00356 */
00357 //#################################################################
00358 CONTOUR_RUN2_DEC
00359 void CONTOUR_RUN2_CLASS::CONTcalcGroups(const std::vector< Image<FLOAT> > &imageMap,
00360                                         const INT iter, const INT lastIter,
00361                                         const bool init)
00362 {
00363   // calculate group DELTA if group adaptation selected
00364   for(INT i = 0; i < CONTgroups; i++)
00365   {
00366     CONTgroupDelta[i] = 0;
00367   }
00368 
00369   if((init == false) && (CONTadaptType == 1))
00370   {
00371     for(INT i = 0; i < CONTsetImageSizeX; i++)
00372     {
00373       for(INT j = 0; j < CONTsetImageSizeY; j++)
00374       {
00375         //what group is this column in?
00376         const INT hold = (INT)CONTgroup.getVal(i,j);
00377         //what was the last iteration
00378         // find delta value for sig. on column, add to group
00379         // That is, what has this group changed by (derivative)
00380         CONTgroupDelta[hold] += CONTgroupMap[iter].getVal(i,j) -
00381                                 CONTgroupMap[lastIter].getVal(i,j);
00382       }
00383     }
00384   }
00385 
00386   // modify supression values using groups
00387   if(CONTadaptType == 1)
00388   {
00389     for(INT g = 0; g < CONTgroups; g++)
00390     {
00391       // if values are too big then add suppression
00392       // this is done per pixel group
00393       if(CONTgroupDelta[g] > CONTgroupTop)
00394       {
00395         CONTgroupMod[g] += CONTsupressionAdd *
00396                            (CONTgroupDelta[g] - CONTgroupTop);
00397         CONTgroupMod2[g] = 1 / CONTgroupMod[g];
00398       }
00399       // if values are too small then remove supression
00400       if(CONTgroupDelta[g] < CONTgroupBottom)
00401       {
00402         CONTgroupMod[g] -= CONTsupressionSub *
00403           fabs(CONTgroupBottom - CONTgroupDelta[g]);
00404       }
00405       // bound group supression
00406       if(CONTgroupMod[g] > CONTmaxGroupSupress)
00407         CONTgroupMod[g] = CONTmaxGroupSupress;
00408       else if(CONTgroupMod[g] < CONTminGroupSupress)
00409         CONTgroupMod[g] = CONTminGroupSupress;
00410     }
00411   }
00412   CONTiterCounter = 0;
00413 }
00414 
00415 //#################################################################
00416 CONTOUR_RUN2_DEC
00417 void CONTOUR_RUN2_CLASS::CONTsetGroupPointers(
00418                                      const std::vector< Image<FLOAT> > &imageMap,
00419                                      const INT a, const INT iter)
00420 {
00421   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
00422   {
00423     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
00424     {
00425       if(imageMap[a].getVal(i,j) > CONTsmallNumber) //optimization
00426       {
00427         const INT thisGroup = (INT)CONTgroup.getVal(i,j);
00428         CONTstaticNeuronMatrix[a][i][j].
00429           sCNP_setGroupMod(&CONTgroupMod[thisGroup]);
00430       }
00431     }
00432   }
00433 }
00434 
00435 //#################################################################
00436 CONTOUR_RUN2_DEC inline
00437 void CONTOUR_RUN2_CLASS::CONTfindFastPlasticity(
00438                                      const std::vector< Image<FLOAT> > &imageMap,
00439                                      const INT a, const INT iter)
00440 {
00441   const FLOAT min = CONTminFastPlasticity;
00442   const FLOAT max = CONTmaxFastPlasticity;
00443   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
00444   {
00445     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
00446     {
00447       if(imageMap[a].getVal(i,j) > CONTsmallNumber) //optimization
00448       {
00449         // FAST PLASTICITY
00450         //MUST KILL OTHER NEURONS!!!! - fast placicity
00451         //sigmoid this at some point ?
00452         FLOAT plast =
00453           CONTneuronMatrix[iter][a][i][j].CNP_getCharge()*CONTfastPlast -
00454           CONTplastDecay;
00455         if(plast < min)
00456         {
00457            plast = min;
00458         }
00459         // upper bound on fast plasticity
00460         else if(plast > max)
00461         {
00462           plast = max;
00463         }
00464         CONTneuronMatrix[iter][a][i][j].CNP_setFastPlast(plast);
00465       }
00466     }
00467   }
00468 }
00469 
00470 //#################################################################
00471 CONTOUR_RUN2_DEC inline
00472 void CONTOUR_RUN2_CLASS::CONTfindPassThroughGain(
00473                                      const std::vector< Image<FLOAT> > &imageMap,
00474                                      const INT a, const INT iter,
00475                                      const INT nextIter)
00476 {
00477   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
00478   {
00479     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
00480     {
00481       //LINFO("%d,%d",i,j);
00482       if(imageMap[a].getVal(i,j) > CONTsmallNumber) //optimization
00483       {
00484         // PASS THROUGH GAIN
00485         //LINFO("OK");
00486         const INT thisGroup = (INT)CONTgroup.getVal(i,j);
00487         //LINFO("AA");
00488         //LINFO("This Group %d",thisGroup);
00489         const FLOAT passThrough = imageMap[a].getVal(i,j) *
00490                                   (CONTpassThroughGain /
00491                                   ((CONTgroupMod[thisGroup] * 5) - 4));
00492         //LINFO("BB");
00493         // set pass through gain for next iter
00494         CONTneuronMatrix[nextIter][a][i][j].CNP_chargeSimple(passThrough);
00495       }
00496     }
00497   }
00498 }
00499 
00500 //#################################################################
00501 CONTOUR_RUN2_DEC
00502 void CONTOUR_RUN2_CLASS::CONTrunImageSigmoid(
00503                              const std::vector< Image<FLOAT> > &imageMap,
00504                              const ContourNeuronCreate<FLOAT> &N,
00505                              const INT iter, const INT nextIter,
00506                              const INT lastIter, const bool init)
00507 {
00508   LINFO("lastIter %d, iter %d, nextIter %d",lastIter,iter,nextIter);
00509   Timer tim;
00510   tim.reset();
00511   int t1,t2,t3;
00512   int t0 = tim.get();  // to measure display time
00513   LINFO("Calculating Salmap");
00514   CONTcalcSalMap(imageMap,iter);
00515   t1 = tim.get();
00516   t2 = t1 - t0; t3 = t2;
00517   LINFO("TIME: %d ms Slice: %d ms",t2,t3);
00518   LINFO("Processing Salmap");
00519   CONTprocessSalMap(imageMap,iter);
00520   t1 = tim.get();
00521   t3 = t2; t2 = t1 - t0; t3 = t2 - t3;
00522   LINFO("TIME: %d ms Slice: %d ms",t2,t3);
00523   if(CONTdoGroupSupression == 1)
00524   {
00525     LINFO("Calculating groups");
00526     CONTcalcGroups(imageMap,iter,lastIter,init);
00527     t1 = tim.get();
00528     t3 = t2; t2 = t1 - t0; t3 = t2 - t3;
00529     LINFO("TIME: %d ms Slice: %d ms",t2,t3);
00530   }
00531   LINFO("Running Pseudo Convolution");
00532   CONTiterateConvolve(imageMap,N,-1,iter,nextIter,init);
00533   t1 = tim.get(); t3 = t2; t2 = t1 - t0; t3 = t2 - t3;
00534   LINFO("TIME: %d ms Slice: %d ms",t2,t3);
00535 }
00536 
00537 
00538 //#################################################################
00539 CONTOUR_RUN2_DEC
00540 void CONTOUR_RUN2_CLASS::CONTiterateConvolve(
00541                                      const std::vector< Image<FLOAT> > &imageMap,
00542                                      const ContourNeuronCreate<FLOAT> &N,
00543                                      const INT node, const INT iter,
00544                                      const INT nextIter, const bool init)
00545 {
00546   //RUN hypercolumn get charges for i put charges for i+1
00547   for(unsigned short a = 0; a < CONTorientations; a++) //this neuron's angle
00548   {
00549     Raster::VisuFloat(CONTimageOpt[a], FLOAT_NORM_0_255,
00550                       sformat("input1.%06d.%d.%d.out.pgm",CONTcurrentFrame,a,
00551                               CONTsetImageSizeX));
00552     Raster::VisuFloat(imageMap[a], FLOAT_NORM_0_255,
00553                       sformat("input2.%06d.%d.%d.out.pgm",CONTcurrentFrame,a,
00554                               CONTsetImageSizeX));
00555     //LINFO("A");
00556     if(init == true)
00557       CONTsetGroupPointers(imageMap,a,iter);
00558     //LINFO("B");
00559     if(CONTdoFastPlast == 1)
00560       CONTfindFastPlasticity(imageMap,a,iter);
00561     //LINFO("C");
00562     if(CONTdoPassThrough == 1)
00563       CONTfindPassThroughGain(imageMap,a,iter,nextIter);
00564     if(init == true)
00565     {
00566       //other neuron's angle
00567       for(unsigned short b = 0; b < CONTorientations; b++)
00568       {
00569         //CONTconvolveSimpleOld(imageMap,N,a,b,node,iter);
00570         // LINFO("D");
00571         CONTconvolveSimpleInit(imageMap,N,a,b,node,iter,nextIter);
00572       }
00573     }
00574     else
00575     {
00576       // LINFO("D2");
00577       if(CONTuseFrameSeries == false)
00578         CONTconvolveSimple(imageMap,N,a,node,iter,nextIter);
00579       else
00580         CONTconvolveSimpleFrames(imageMap,N,a,node,iter,nextIter);
00581     }
00582   }
00583 }
00584 
00585 /*
00586 
00587 ---------Psuedo-Convolution Core for CINNIC. Current version-----------
00588 
00589 This is the meat of CINNIC. Here the hyper column is looped over
00590    for all neurons and interactions. While the main loop is only
00591    6 nested levels deep, multiple optimizations are added to test
00592    if a neuron should be used. For instance if it is zero skip it
00593 
00594 */
00595 
00596 //#################################################################
00597 CONTOUR_RUN2_DEC
00598 void CONTOUR_RUN2_CLASS::CONTconvolveSimpleInit(
00599                                     const std::vector< Image<FLOAT> > &imageMap,
00600                                     const ContourNeuronCreate<FLOAT> &N,
00601                                     const INT a, const INT b, const INT node,
00602                                     const INT iter, const INT nextIter)
00603 {
00604   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
00605   {
00606     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
00607     {
00608       if((imageMap[a].getVal(i,j) > CONTsmallNumber) ||
00609          (CONTuseFrameSeries == true))   //optimization
00610       {
00611         const INT thisGroup = (INT)CONTgroup.getVal(i,j);
00612         //Other Neuron's position X
00613         for(INT k = 0; k <= CONTkernelSize; k++)
00614         {
00615           //Current position plus its field - center
00616           const INT InspX = i + (k - (INT)XCenter);
00617           if(InspX >= 0)
00618           {
00619             if(InspX < CONTsetImageSizeX)
00620             {
00621               //Other Neuron's position Y
00622               for(INT l = 0; l <= CONTkernelSize; l++)
00623               {
00624                 const INT InspY = j - (l-(INT)YCenter);
00625                 if(InspY >= 0) //stay inside of image
00626                 {
00627                   if(InspY < CONTsetImageSizeY)
00628                   {
00629                     //check that this element != 0
00630                     if(N.FourDNeuralMap[a][b][k][l].zero)
00631                     {
00632                       // optimization
00633                       if(imageMap[b].getVal(InspX,InspY) > CONTorThresh )
00634                       {
00635                         FLOAT hold;
00636                         bool polarity;
00637                         // this is done this way to optimize the code
00638                         // for multiple
00639                         // iteration, by storing some of the first iterations
00640                         // results in an array
00641 
00642                         // ## it is important to note that ImageOpt
00643                         // = Image*energy, thus it's an optimization
00644                         // if this is a negative number ,
00645                         // then supression may be modified
00646 
00647                         // FORMULA HERE
00648                         // orient filered image1 * orient filtered image2
00649                         // * their excitation
00650                         const FLOAT weight =
00651                           ((CONTimageOpt[a].getVal(i,j) *
00652                             imageMap[b].getVal(InspX,InspY)) *
00653                            N.FourDNeuralMap[a][b][k][l].angABD);
00654                         //CONTstoreVal[CONTiterCounter] =
00655                         // apply group supression if < 0
00656                         if(N.FourDNeuralMap[a][b][k][l].angABD < 0)
00657                         {
00658                           polarity = true;
00659                           hold =
00660                             CONTneuronMatrix[iter][a][i][j].
00661                             CNP_computeSupress(weight,CONTgroupMod[thisGroup]);
00662                           /* old
00663                           hold =
00664                             CONTgroupMod[thisGroup] *
00665                             mod * CONTstoreVal[CONTiterCounter];
00666                           */
00667                         }
00668                         else // value is > 0, no group supression
00669                         {
00670                           polarity = false;
00671                           hold =
00672                             CONTneuronMatrix[iter][a][i][j].
00673                             CNP_computeExcite(weight,CONTgroupMod[thisGroup]);
00674                         }
00675                         // set energy prior to sal map
00676                         CONTneuronMatrix[nextIter][b][InspX][InspY].
00677                           CNP_chargeSimple(hold);
00678                         //! set connection between these neurons as active
00679                         if(CONTuseFrameSeries == false)
00680                         {
00681                           CONTstaticNeuronMatrix[a][i][j].
00682                             sCNP_insertStoreList((unsigned char)b,
00683                                                  (unsigned char)InspX,
00684                                                  (unsigned char)InspY,
00685                                                  polarity,weight);
00686                         }
00687                         else
00688                         {
00689                           CONTstaticNeuronMatrix[a][i][j].
00690                             sCNP_insertStoreList((unsigned char)b,
00691                                                  (unsigned char)InspX,
00692                                                  (unsigned char)InspY,
00693                                                  polarity,
00694                                           N.FourDNeuralMap[a][b][k][l].angABD);
00695                         }
00696                       }
00697                     }
00698                   }
00699                 }
00700               }
00701             }
00702           }
00703         }
00704       }
00705     }
00706   }
00707 }
00708 
00709 //#################################################################
00710 CONTOUR_RUN2_DEC
00711 void CONTOUR_RUN2_CLASS::CONTconvolveSimpleOld(
00712                                     const std::vector< Image<FLOAT> > &imageMap,
00713                                     const ContourNeuronCreate<FLOAT> &N,
00714                                     const INT a, const INT b, const INT node,
00715                                     const INT iter, const INT nextIter)
00716 {
00717   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
00718   {
00719     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
00720     {
00721       if(imageMap[a].getVal(i,j) > CONTsmallNumber) //optimization
00722       {
00723         const INT thisGroup = (INT)CONTgroup.getVal(i,j);
00724         float mod;
00725         mod = CONTneuronMatrix[iter][a][i][j].CNP_getCharge()*CONTfastPlast;
00726         if(mod < 1){mod = 1;}
00727         if(mod > 5){mod = 5;}
00728         CONTneuronMatrix[iter][a][i][j].CNP_setFastPlast(mod);
00729         float crap = imageMap[a].getVal(i,j)*
00730           (CONTpassThroughGain/((CONTgroupMod[thisGroup]*5)-4));
00731         CONTneuronMatrix[nextIter][a][i][j].CNP_chargeSimple(crap);
00732         //Other Neuron's position X
00733         for(INT k = 0; k <= CONTkernelSize; k++)
00734         {
00735           //Current position plus its field - center
00736           const INT InspX = i + (k - (INT)XCenter);
00737           if(InspX >= 0)
00738           {
00739             if(InspX < CONTsetImageSizeX)
00740             {
00741               //Other Neuron's position Y
00742               for(INT l = 0; l <= CONTkernelSize; l++)
00743               {
00744                 const INT InspY = j - (l-(INT)YCenter);
00745                 if(InspY >= 0) //stay inside of image
00746                 {
00747                   if(InspY < CONTsetImageSizeY)
00748                   {
00749                     //check that this element != 0
00750                     if(N.FourDNeuralMap[a][b][k][l].zero)
00751                     {
00752                       // optimization
00753                       if(imageMap[b].getVal(InspX,InspY) > CONTorThresh )
00754                       {
00755                         FLOAT hold;
00756                         // this is done this way to optimize the code
00757                         // for multiple
00758                         // iteration, by storing some of the first iterations
00759                         // results in an array
00760 
00761                         // ## it is important to note that ImageOpt
00762                         // = Image*energy, thus it's an optimization
00763                         // if this is a negative number ,
00764                         // then supression may be modified
00765 
00766                         // FORMULA HERE
00767                         // orient filered image1 * orient filtered image2
00768                         // * their excitation
00769                         const FLOAT weight =
00770                           ((CONTimageOpt[a].getVal(i,j) *
00771                             imageMap[b].getVal(InspX,InspY)) *
00772                            N.FourDNeuralMap[a][b][k][l].angABD);
00773                         //CONTstoreVal[CONTiterCounter] =
00774                         // apply group supression if < 0
00775                         if(N.FourDNeuralMap[a][b][k][l].angABD < 0)
00776                         {
00777                           hold =
00778                             CONTneuronMatrix[iter][a][i][j].
00779                             CNP_computeSupress(weight,CONTgroupMod[thisGroup]);
00780                           /* old
00781                           hold =
00782                             CONTgroupMod[thisGroup] *
00783                             mod * CONTstoreVal[CONTiterCounter];
00784                           */
00785                         }
00786                         else // value is > 0, no group supression
00787                         {
00788                           hold =
00789                             CONTneuronMatrix[iter][a][i][j].
00790                             CNP_computeExcite(weight,CONTgroupMod[thisGroup]);
00791                         }
00792                         // set energy prior to sal map
00793                         CONTneuronMatrix[nextIter][b][InspX][InspY].
00794                           CNP_chargeSimple(hold);
00795                       }
00796                     }
00797                   }
00798                 }
00799               }
00800             }
00801           }
00802         }
00803       }
00804     }
00805   }
00806 }
00807 
00808 //#################################################################
00809 CONTOUR_RUN2_DEC
00810 void CONTOUR_RUN2_CLASS::CONTconvolveSimple(
00811                                     const std::vector< Image<FLOAT> > &imageMap,
00812                                     const ContourNeuronCreate<FLOAT> &N,
00813                                     const INT a, const INT node,
00814                                     const INT iter, const INT nextIter)
00815 {
00816   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
00817   {
00818     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
00819     {
00820       if(imageMap[a].getVal(i,j) > CONTsmallNumber) //optimization
00821       {
00822         const INT thisGroup = (INT)CONTgroup.getVal(i,j);
00823         for(unsigned int n = 0;
00824             n < CONTstaticNeuronMatrix[a][i][j].sCNP_getActiveNeuronCount();
00825             n++)
00826         {
00827           FLOAT hold;
00828           if(CONTstaticNeuronMatrix[a][i][j].sCNP_getOtherNeuronPol(n))
00829           {
00830             hold =
00831               CONTneuronMatrix[iter][a][i][j].
00832               CNP_computeSupress(CONTstaticNeuronMatrix[a][i][j].
00833                                  sCNP_getWeightStoreVal(n),
00834                                  CONTgroupMod[thisGroup]);
00835           }
00836           else // value is > 0, no group supression
00837           {
00838             hold =
00839               CONTneuronMatrix[iter][a][i][j].
00840               CNP_computeExcite(CONTstaticNeuronMatrix[a][i][j].
00841                                 sCNP_getWeightStoreVal(n),
00842                                 CONTgroupMod[thisGroup]);
00843           }
00844           // set energy prior to sal map
00845           CONTneuronMatrix[nextIter]
00846             [CONTstaticNeuronMatrix[a][i][j].
00847              sCNP_getOtherNeuronAlpha(n)]
00848             [CONTstaticNeuronMatrix[a][i][j].
00849              sCNP_getOtherNeuron_i(n)]
00850             [CONTstaticNeuronMatrix[a][i][j].
00851              sCNP_getOtherNeuron_j(n)].
00852             CNP_chargeSimple(hold);
00853         }
00854       }
00855     }
00856   }
00857 }
00858 
00859 //#################################################################
00860 CONTOUR_RUN2_DEC
00861 void CONTOUR_RUN2_CLASS::CONTconvolveSimpleFrames(
00862                                     const std::vector< Image<FLOAT> > &imageMap,
00863                                     const ContourNeuronCreate<FLOAT> &N,
00864                                     const INT a, const INT node,
00865                                     const INT iter, const INT nextIter)
00866 {
00867 
00868   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
00869   {
00870     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
00871     {
00872       if(imageMap[a].getVal(i,j) > CONTsmallNumber) //optimization
00873       {
00874         const INT thisGroup = (INT)CONTgroup.getVal(i,j);
00875         for(unsigned int n = 0;
00876             n < CONTstaticNeuronMatrix[a][i][j].sCNP_getActiveNeuronCount();
00877             n++)
00878         {
00879           const unsigned char other_i  =
00880             CONTstaticNeuronMatrix[a][i][j].sCNP_getOtherNeuron_i(n);
00881           const unsigned char other_j  =
00882             CONTstaticNeuronMatrix[a][i][j].sCNP_getOtherNeuron_j(n);
00883           const unsigned char other_a  =
00884             CONTstaticNeuronMatrix[a][i][j].sCNP_getOtherNeuronAlpha(n);
00885 
00886           if(imageMap[other_a].getVal(other_i,other_j) > CONTsmallNumber)
00887           {
00888             const FLOAT neuralMapWeight =
00889               CONTstaticNeuronMatrix[a][i][j].sCNP_getWeightStoreVal(n);
00890 
00891             const FLOAT weight = ((CONTimageOpt[a].getVal(i,j) *
00892                                    imageMap[other_a].getVal(other_i,other_j)) *
00893                                   neuralMapWeight);
00894 
00895             FLOAT hold;
00896             if(CONTstaticNeuronMatrix[a][i][j].sCNP_getOtherNeuronPol(n))
00897             {
00898               hold =
00899                 CONTneuronMatrix[iter][a][i][j].
00900                 CNP_computeSupress(weight,CONTgroupMod[thisGroup]);
00901             }
00902             else // value is > 0, no group supression
00903             {
00904               hold =
00905                 CONTneuronMatrix[iter][a][i][j].
00906                 CNP_computeExcite(weight,CONTgroupMod[thisGroup]);
00907             }
00908             // set energy prior to sal map
00909             CONTneuronMatrix[nextIter][other_a][other_i][other_j].
00910               CNP_chargeSimple(hold);
00911           }
00912         }
00913       }
00914     }
00915   }
00916 }
00917 
00918 //#################################################################
00919 CONTOUR_RUN2_DEC
00920 void CONTOUR_RUN2_CLASS::CONTcontourRunMain(
00921                                         const std::vector< Image<FLOAT> > &imageMap,
00922                                         const ContourNeuronCreate<FLOAT> &N,
00923                                         readConfig &config,
00924                                         const Image<FLOAT> &group,
00925                                         const INT groups,
00926                                         const INT iter,
00927                                         const FLOAT groupTop)
00928 {
00929   bool init = false;
00930   if(iter == 0)
00931   {
00932     CONTgroupTop = groupTop;
00933     CONTgroup    = group;
00934     CONTgroups   = groups;
00935     CONTsetConfig(config);
00936     CONTsetImageSize(imageMap[1].getWidth(),imageMap[1].getHeight());
00937     CONTresetMatrix();
00938     CONTpreImage(imageMap,N);
00939     // groups can be set to either 1 or 0 depending on whether or not you
00940     // want supression to happen only when a group reaches threshold
00941     // excitation or not.
00942     CONTgroupMod.resize(groups,CONTinitialGroupVal);
00943     CONTgroupMod2.resize(groups,CONTinitialGroupVal);
00944     CONTgroupDelta.resize(groups,0.0F);
00945     init = true;
00946   }
00947   CONTiterCounter = 0;
00948   INT lastIter = iter-1;
00949   INT nextIter = iter+1;
00950   CONTcurrentFrame = CONTcurrentIter;
00951   CONTrunImageSigmoid(imageMap,N,iter,nextIter,lastIter,init);
00952   CONToutputFastPlasticity(iter);
00953   CONToutputGroupSupression(iter);
00954 }
00955 
00956 //#################################################################
00957 CONTOUR_RUN2_DEC
00958 void CONTOUR_RUN2_CLASS::CONTcontourRunFrames(
00959                                         const std::vector< Image<FLOAT> > &imageMap,
00960                                         const ContourNeuronCreate<FLOAT> &N,
00961                                         readConfig &config,
00962                                         const Image<FLOAT> &group,
00963                                         const INT groups,
00964                                         const INT frame,
00965                                         const FLOAT groupTop)
00966 {
00967   bool init = false;
00968   CONTcurrentFrame = frame;
00969   if(frame == 1)
00970   {
00971     CONTgroupTop    = groupTop;
00972     CONTgroup       = group;
00973     CONTgroups      = groups;
00974     CONTcurrentIter = 0;
00975     CONTsetConfig(config);
00976     CONTsetImageSize(imageMap[1].getWidth(),imageMap[1].getHeight());
00977     CONTresetMatrix();
00978     CONTpreImage(imageMap,N);
00979     // groups can be set to either 1 or 0 depending on whether or not you
00980     // want supression to happen only when a group reaches threshold
00981     // excitation or not.
00982     CONTgroupMod.resize(groups,CONTinitialGroupVal);
00983     CONTgroupMod2.resize(groups,CONTinitialGroupVal);
00984     CONTgroupDelta.resize(groups,0.0F);
00985     init = true;
00986   }
00987   CONTiterCounter = 0;
00988   INT lastIter;
00989   INT nextIter;
00990 
00991   LINFO("current %d iter %d",CONTcurrentIter,CONTiterations);
00992   if(CONTcurrentIter == (CONTiterations - 1))
00993   {
00994     lastIter = CONTiterations - 2;
00995     nextIter = 0;
00996     LINFO("1");
00997   }
00998   else if(CONTcurrentIter == 0)
00999   {
01000     lastIter = CONTiterations - 1;
01001     nextIter = 1;
01002     LINFO("2");
01003   }
01004   else
01005   {
01006     lastIter = CONTcurrentIter - 1;
01007     nextIter = CONTcurrentIter + 1;
01008     LINFO("3");
01009   }
01010   LINFO("%d iter, %d nextIter, %d lastIter",CONTcurrentIter,nextIter,lastIter);
01011   CONTresetCharge(nextIter);
01012   CONTsetImageOpt(imageMap,false);
01013   CONTrunImageSigmoid(imageMap,N,CONTcurrentIter,nextIter,lastIter,init);
01014   CONToutputFastPlasticity(CONTcurrentIter);
01015   CONToutputGroupSupression(CONTcurrentIter);
01016 
01017   // since all data is stored in the next iter, that's where the calling
01018   // methods should look for results
01019   CONTstoreCurrentIter = nextIter;
01020 
01021   if(CONTcurrentIter == (CONTiterations - 1))
01022     CONTcurrentIter = 0;
01023   else
01024     CONTcurrentIter++;
01025 }
01026 
01027 //#################################################################
01028 CONTOUR_RUN2_DEC
01029 void CONTOUR_RUN2_CLASS::CONToutputFastPlasticity(INT iter)
01030 {
01031   Image<float> output;
01032   output.resize(CONTsetImageSizeX,CONTsetImageSizeY);
01033   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
01034   {
01035     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
01036     {
01037       output.setVal(i,j,0.0F);
01038     }
01039   }
01040 
01041   // sum image values for each hyper column
01042   for(INT a = 0; a < CONTorientations; a++)
01043   {
01044     for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
01045     {
01046       for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
01047       {
01048         output.setVal(i,j,CONTneuronMatrix[iter][a][i][j].CNP_getFastPlast() +
01049                       output.getVal(i,j));
01050       }
01051     }
01052   }
01053 
01054   // find max value to normalize image
01055   float maxVal = 0;
01056   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
01057   {
01058     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
01059     {
01060       if(output.getVal(i,j) > maxVal){ maxVal = output.getVal(i,j);}
01061     }
01062   }
01063 
01064   // create image with blue/green normalized by max value possible and
01065   // red normalized by max value observed. This normalizes the output
01066   // but gives us an idea of scale. Scale is higher if output is more white.
01067   Image<PixRGB<float> > RGoutput;
01068   RGoutput.resize(CONTsetImageSizeX,CONTsetImageSizeY);
01069   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
01070   {
01071     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
01072     {
01073       const FLOAT val1 = (output.getVal(i,j)/
01074                           (CONTmaxFastPlasticity*CONTorientations)) * 255.0F;
01075       const FLOAT val2 = (output.getVal(i,j)/maxVal) * 255.0F;
01076       const PixRGB<float> pix(val2,val1,val1);
01077       RGoutput.setVal(i,j,pix);
01078     }
01079   }
01080 
01081   RGoutput = rescale(RGoutput,CONTsetImageSizeX*4,CONTsetImageSizeY*4);
01082 
01083   Raster::VisuRGB(RGoutput, sformat("fastPlast.%d.%06d.out.ppm",
01084                                     CONTsetImageSizeX,CONTcurrentFrame));
01085 }
01086 
01087   //#################################################################
01088 CONTOUR_RUN2_DEC
01089 void CONTOUR_RUN2_CLASS::CONToutputGroupSupression(INT iter)
01090 {
01091   Image<float> output;
01092   output.resize(CONTsetImageSizeX,CONTsetImageSizeY);
01093   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
01094   {
01095     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
01096     {
01097       output.setVal(i,j,0.0F);
01098     }
01099   }
01100 
01101   for(INT a = 0; a < CONTorientations; a++)
01102   {
01103     for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
01104     {
01105       for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
01106       {
01107         const INT thisGroup = (INT)CONTgroup.getVal(i,j);
01108         output.setVal(i,j,CONTgroupMod[thisGroup] + output.getVal(i,j));
01109       }
01110     }
01111   }
01112 
01113   // find max value to normalize image
01114   float maxVal = 0;
01115   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
01116   {
01117     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
01118     {
01119       if(output.getVal(i,j) > maxVal){ maxVal = output.getVal(i,j);}
01120     }
01121   }
01122 
01123   // create image with blue/green normalized by max value possible and
01124   // red normalized by max value observed. This normalizes the output
01125   // but gives us an idea of scale. Scale is higher if output is more white.
01126   Image<PixRGB<float> > RGoutput;
01127   RGoutput.resize(CONTsetImageSizeX,CONTsetImageSizeY);
01128   for(INT i = 0; i < CONTsetImageSizeX; i++) //This Neuron's position X
01129   {
01130     for(INT j = 0; j < CONTsetImageSizeY; j++) //This Neuron's position Y
01131     {
01132       const FLOAT val1 = (output.getVal(i,j)/
01133                           (CONTmaxGroupSupress*CONTorientations)) * 255.0F;
01134       const FLOAT val2 = (output.getVal(i,j)/maxVal) * 255.0F;
01135       const PixRGB<float> pix(val2,val1,val1);
01136       RGoutput.setVal(i,j,pix);
01137     }
01138   }
01139 
01140   RGoutput = rescale(RGoutput,CONTsetImageSizeX*4,CONTsetImageSizeY*4);
01141 
01142   Raster::VisuRGB(RGoutput, sformat("groupSup.%d.%06d.out.ppm",
01143                                     CONTsetImageSizeX,CONTcurrentFrame));
01144 }
01145 
01146 // ######################################################################
01147 CONTOUR_RUN2_DEC
01148 INT CONTOUR_RUN2_CLASS::CONTgetCurrentIter()
01149 {
01150   return CONTstoreCurrentIter;
01151 }
01152 
01153 #undef CONTOUR_RUN2_DEC
01154 #undef CONTOUR_RUN2_CLASS
01155 
01156 
01157 // explicit instantiations:
01158 #define CR2INST contourRun2<(unsigned short)12, (unsigned short)3, \
01159     (unsigned short)4, (unsigned short)3, float, int>
01160 
01161 template class CR2INST;
01162 template <> const float CR2INST::CONTmaxFastPlasticity     = 5.0F;
01163 template <> const float CR2INST::CONTminFastPlasticity     = 1.0F;
01164 template <> const float CR2INST::CONTmaxGroupSupress       = 10.0F;
01165 template <> const float CR2INST::CONTminGroupSupress       = 1.0F;
01166 template <> const float CR2INST::CONTsmallNumber           = 0.001F;
01167 
01168 
01169 
01170 // ######################################################################
01171 /* So things look consistent in everyone's emacs... */
01172 /* Local Variables: */
01173 /* indent-tabs-mode: nil */
01174 /* End: */
Generated on Sun May 8 08:40:22 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3