contourRun.C

Go to the documentation of this file.
00001 /*!@file CINNIC/contourRun.C CINNIC classes - src3 */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00005 // University of Southern California (USC) and the iLab at USC.         //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: T Nathan Mundhenk <mundhenk@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/CINNIC/contourRun.C $
00035 // $Id: contourRun.C 6191 2006-02-01 23:56:12Z rjpeters $
00036 //
00037 
00038 // ############################################################
00039 // ############################################################
00040 // ##### ---CINNIC---
00041 // ##### Contour Integration:
00042 // ##### T. Nathan Mundhenk nathan@mundhenk.com
00043 // ############################################################
00044 // ############################################################
00045 
00046 #include "CINNIC/contourRun.H"
00047 
00048 #include "Util/Assert.H"
00049 #include "Image/ColorOps.H"
00050 #include "Image/ShapeOps.H"
00051 #include "Util/log.H"
00052 
00053 #include <cmath>
00054 #include <cstdlib>
00055 #include <fstream>
00056 
00057 static float Resistance;  // pulled out of contourRun.H to eliminate warning
00058 
00059 
00060 //#################################################################
00061 contourRun::contourRun()
00062 {
00063 }
00064 
00065 //#################################################################
00066 contourRun::~contourRun()
00067 {
00068 }
00069 
00070 //#################################################################
00071 Image<float> contourRun::getSMI(int iter)
00072 {
00073   return SMI[iter];
00074 }
00075 
00076 //#################################################################
00077 void contourRun::copyCombinedSalMap(std::vector< Image<float> > &CSM)
00078 {
00079   combinedSalMap = CSM;
00080 }
00081 
00082 //#################################################################
00083 void contourRun::setConfig(readConfig &config)
00084 {
00085   iterations = (int)config.getItemValueF("iterations");
00086   timestep = config.getItemValueF("timestep");
00087   maxEnergy = config.getItemValueF("maxEnergy");
00088   BaseThreshold = config.getItemValueF("BaseThreshold");
00089   Resistance = config.getItemValueF("Resistance");
00090   imageSaveTo = config.getItemValueC("imageOutDir");
00091   logSaveTo = config.getItemValueC("logOutDir");
00092   dumpSwitchPos = (int)config.getItemValueF("dumpSwitchPos");
00093   upperLimit = config.getItemValueF("upperLimit");
00094   saveto = config.getItemValueC("imageOutDir");
00095   iTrans = config.getItemValueF("overlayDumpValue");
00096   GroupBottom = config.getItemValueF("GroupBottom");
00097   supressionAdd = config.getItemValueF("supressionAdd");
00098   supressionSub = config.getItemValueF("supressionSub");
00099   cascadeType = (int)config.getItemValueF("cascadeType");
00100   adaptType = (int)config.getItemValueF("adaptType");
00101   adaptNeuronThresh = config.getItemValueF("adaptNeuronThresh");
00102   adaptNeuronMax = config.getItemValueF("adaptNeuronMax");
00103   excMult =  config.getItemValueF("excMult");
00104   leak = config.getItemValueF("leak");
00105   orThresh = config.getItemValueF("orThresh");
00106   initialGroupVal = config.getItemValueF("initialGroupVal");
00107   fastPlast = config.getItemValueF("fastPlast");
00108   doFastPlast = (int)config.getItemValueF("doFastPlast");
00109   lastIterOnly = (int)config.getItemValueF("lastIterOnly");
00110   doTableOnly = (int)config.getItemValueF("doTableOnly");
00111   passThroughGain = config.getItemValueF("passThroughGain");
00112   passThroughTaper = config.getItemValueF("passThroughTaper");
00113 }
00114 //#################################################################
00115 void contourRun::setArraySize(long size)
00116 {
00117   storeVal = new float[size];
00118 }
00119 
00120 //#################################################################
00121 float contourRun::sigmoid(float beta, float v)
00122 {
00123   return (1.0f / (1.0f + pow(2.71828f, (-2.0f * (beta * v)))));
00124 }
00125 //#################################################################
00126 float contourRun::sigmoid2(float beta, float v)
00127 {
00128   return(1.0f / (1.0f + pow(2.71828f, (-1.0f * (beta+v) ))));
00129 }
00130 //#################################################################
00131 float contourRun::preSigmoid(float v, float thresh, float beta)
00132 {
00133   if(v >= thresh) //optimization (?)
00134   {
00135     return (thresh-1);
00136   }
00137   else
00138   {
00139     float sig = sigmoid(beta,(((v/thresh)*(2*(1/beta))-(1/beta))));
00140     if((sig*thresh) > thresh){LINFO("FOO %f",(sig*thresh));}
00141     return (sig*thresh);
00142   }
00143 }
00144 //#################################################################
00145 void contourRun::dumpEnergySigmoid(const char* filename, const char* savefile,
00146                                    readConfig &config, Image<float> image,
00147                                    int scaleNo, int scaleTot)
00148 {
00149   ASSERT(scaleNo >= 0);ASSERT(scaleTot > 0);
00150   Image< byte > Tyte;
00151   Image<PixRGB<byte> > TPyte;
00152   Image<float> SMIr;
00153   std::ofstream propFile("CINNIC.prop",std::ios::out);
00154   propFile << "scale " << scaleTot << "\n"
00155            << "iterations " << iterations << "\n"
00156            << "sizeX " << image.getWidth() << "\n"
00157            << "sizeY " << image.getHeight() << "\n";
00158   int sx = image.getWidth(); int sy = image.getHeight();
00159   propFile << "type potential\n"; //output to propfile
00160   propFile << "type cascade\n"; //output to propfile
00161   propFile << "type overlay\n"; //output to propfile
00162   propFile.close();
00163   for(int i = 0; i < iterations; i++)
00164   {
00165     SMIr.resize(SMI[i].getWidth(),SMI[i].getHeight());
00166     for(int x = 0; x < SMI[i].getWidth(); x++)
00167     {
00168       for(int y = 0; y < SMI[i].getHeight(); y++)
00169       {
00170         if((x == 0) || (y == 0))
00171         {
00172           SMIr.setVal(x,y,0);
00173         }
00174         else
00175         {
00176           SMIr.setVal(x,y,SMI[i].getVal(x-1,y-1));
00177         }
00178       }
00179     }
00180 
00181     SMIr = rescale(SMIr, sx,sy);
00182     Tyte = SMIr;
00183     int xx = Tyte.getWidth();
00184     int yy = Tyte.getHeight();
00185     // do not write images other then the last one if value is set
00186     if(((lastIterOnly == 0) || (i == (iterations - 1))) && (doTableOnly == 0))
00187     {
00188       Raster::WriteRGB(Image<PixRGB<byte> >(Tyte),
00189                        sformat("%s%s.potential.out.%d.%d.%d.%d.ppm"
00190                                ,saveto,savefile,scaleNo,i,xx,yy));
00191       TPyte = cascadeMap[i];
00192       overlayS = overlayStain(SMIr,image,iTrans,'r');
00193       TPyte = overlayS;
00194       Raster::WriteRGB(TPyte,
00195                        sformat("%s%s.overlay.out.%d.%d.%d.%d.ppm"
00196                                ,saveto,savefile,scaleNo,i,xx,yy));
00197     }
00198     LINFO("CRAP");
00199   }
00200 }
00201 //#################################################################
00202 void contourRun::setImageSize(int X, int Y)
00203 {
00204   ASSERT((X > 0) && (Y > 0));
00205   setImageSizeY = Y;
00206   setImageSizeX = X;
00207 }
00208 //#################################################################
00209 void contourRun::setIterations(int iter)
00210 {
00211   ASSERT(iter > 0);
00212   iterations = iter;
00213 }
00214 //#################################################################
00215 /*! find the max energy per time slice, timestep is 1/x seconds where
00216   x = timestep
00217  */
00218 void contourRun::deriveEnergy()
00219 {
00220   energy = maxEnergy/timestep;
00221 }
00222 //#################################################################
00223 /*! this method will reset the neural matrix. Must be called before run image
00224  */
00225 void contourRun::resetMatrix()
00226 {
00227   int it = (int)iterations+2;
00228   mat1.resize(setImageSizeY,prop);
00229   mat2.resize(setImageSizeX,mat1);
00230   mat3.resize(AnglesUsed,mat2);
00231   NeuronMatrix.resize(it,mat3);
00232   //LINFO("NeuronMatrix vectors set");
00233   float foo = BaseThreshold;
00234   for(int n = 0; n < (int)(iterations+2.0F); n++)
00235   {
00236     for(int i = 0; i < AnglesUsed; i++)
00237     {
00238       for(int j = 0; j < setImageSizeX; j++)
00239       {
00240         for(int k = 0; k < setImageSizeY; k++)
00241         {
00242           NeuronMatrix[n][i][j][k].ResetTempCharge();
00243           NeuronMatrix[n][i][j][k].ResetCharge();
00244           NeuronMatrix[n][i][j][k].setThreshold(foo,Resistance);
00245         }
00246       }
00247     }
00248   }
00249   //LINFO("reset done");
00250 }
00251 
00252 //#################################################################
00253 void contourRun::preImage(std::vector< Image<float> > &imageMap,
00254                           ContourNeuronCreate<float> &N)
00255 {
00256   iterCounter = 0;
00257   cascadeChunk = 100;
00258   deriveEnergy();
00259   SM.resize(setImageSizeX,setImageSizeY,true);
00260   SMI.resize((int)iterations,SM);
00261   cascadeMap.resize((int)iterations,
00262                     Image<PixRGB<float> >(setImageSizeX, setImageSizeY, ZEROS));
00263   GroupMap.resize((int)iterations,SM);
00264   cascade.resize(cascadeChunk);
00265   cascadeSize.resize(setImageSizeX,setImageSizeY);
00266   ICH.resize(setImageSizeX,setImageSizeY,true);
00267   cascadeImage.resize(cascadeChunk,ICH);
00268   imageOpt.resize(AnglesUsed,imageMap[1]);
00269 
00270   //move through the entire image  ImageMap ImageOpt;
00271   for(int a = 0; a < AnglesUsed; a++)
00272   {
00273     imageOpt[a].resize(imageMap[a].getWidth(),imageMap[a].getHeight(),true);
00274     for(int i = 0; i < setImageSizeX; i++)
00275     {
00276       for(int j = 0; j < setImageSizeY; j++)
00277       {
00278         imageOpt[a].setVal(i,j,(imageMap[a].getVal(i,j)*energy));
00279       }
00280     }
00281   }
00282 #if 0
00283   /* commented this loop out because
00284 
00285      (1) it triggers a bug in g++ 3.4.1, but
00286 
00287      (2) this loop should be unneccessary in any case because in the above
00288          line:
00289 
00290          cascadeImage.resize(cascadeChunk,ICH);
00291 
00292          cascadeImage gets set to have 'cascadeChunk' number of copies of
00293          ICH, and ICH was already resize'd to
00294          (setImageSizeX,setImageSizeY). So... each cascadeImage[i] should
00295          already have the desired size, so we can skip this loop, and we
00296          can therefore avoid triggering the bug in g++ 3.4.1!
00297 
00298       2004-08-06 <rjpeters>
00299 
00300         submitted this to gcc's bugzilla database along with a reduced
00301         testcase... the bug is trackable here:
00302 
00303                  http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16905
00304    */
00305   for(int i = 0; i < cascadeChunk; i++)
00306   {
00307     cascadeImage[i].resize(setImageSizeX,setImageSizeY);
00308   }
00309 #endif
00310 }
00311 
00312 /*
00313    OUTER LOOP/GROUP SUPRESSION COMPUTATION
00314    here we execute outer portions of the 6 layer loop in CINNIC
00315    for psuedo-convolution
00316    We also compute the group supression based upon change in groups
00317    overall total activity change (derivative)
00318 */
00319 //#################################################################
00320 void contourRun::calcGroups(std::vector< Image<float> > &imageMap,
00321                                  ContourNeuronCreate<float> &N, int iter)
00322 {
00323   float hold;
00324   SMI[iter].resize(setImageSizeX,setImageSizeY,true);
00325   cascadeMap[iter].resize(setImageSizeX,setImageSizeY,true);
00326   GroupMap[iter].resize(setImageSizeX,setImageSizeY,true);
00327   for(int thing = 0; thing < Groups; thing++)
00328   {
00329     GroupHold[thing] = 0;
00330   }
00331 
00332   //First check potentials for each column at this iteration,
00333   //reset if nessessary
00334   for(int i = 0; i < setImageSizeX; i++)
00335   {
00336     for(int j = 0; j < setImageSizeY; j++)
00337     {
00338       for(int a = 0; a < AnglesUsed; a++)
00339       {
00340         //Add up all charges in this column
00341         //if charge is negative then make zero
00342         if(NeuronMatrix[iter][a][i][j].getCharge() < 0)
00343         {
00344           NeuronMatrix[iter][a][i][j].ResetCharge();
00345         }
00346         // add this iteration plus all others combined here
00347         // i.e. here lies the integrator for each pixel!
00348         hold = SM.getVal(i,j)+
00349           NeuronMatrix[iter][a][i][j].getCharge();
00350           SM.setVal(i,j,hold);
00351 
00352       }
00353 
00354       // leak this neuron
00355       hold = SM.getVal(i,j) - leak;
00356       if(hold < 0){hold = 0;}
00357       SM.setVal(i,j,hold);
00358       // Compute sigmoid of column this iteration
00359       hold = preSigmoid(SM.getVal(i,j),upperLimit);
00360       //hold = sigmoid2(upperLimit,SM.getVal(i,j));
00361       // set output (for movie) to a value from 0 to 256
00362       GroupMap[iter].setVal(i,j,hold);
00363       // set value into this iteration normalized
00364       if((i > 0) && (j > 0))
00365       {
00366         SMI[iter].setVal((i-1),(j-1),((hold/upperLimit)*255));
00367       }
00368       // calculate group DELTA if group adaptation selected
00369       if((iter > 0) && (adaptType == 1))
00370       {
00371         int ghold = (int)Group.getVal(i,j); //what group is this column in?
00372         int ihold = iter - 1; //what was the last iteration
00373         // find delta value for sig. on column, add to group
00374         GroupHold[ghold] += GroupMap[iter].getVal(i,j) -
00375           GroupMap[ihold].getVal(i,j);
00376       }
00377     }
00378   }
00379 
00380   // modify supression values using groups
00381   if(adaptType == 1)
00382   {
00383     for(int g = 0; g < Groups; g++)
00384     {
00385       // if values are too big then add suppression
00386       // this is done per pixel group
00387       //LINFO("Group %d DELTA %f \tSUPRESSION %f",g,GroupHold[g],GroupMod[g]);
00388       if(GroupHold[g] > GroupTop)
00389       {
00390         GroupMod[g] += supressionAdd*(GroupHold[g]-GroupTop);
00391         GroupMod2[g] = 1/GroupMod[g];
00392         //LINFO("NEW Group Supression %d:%f",g,GroupMod[g]);
00393       }
00394       // if values are too small then remove supression
00395       if(GroupHold[g] < GroupBottom)
00396       {
00397         GroupMod[g] -= supressionSub;
00398       }
00399     }
00400   }
00401 }
00402 
00403 //#################################################################
00404 void contourRun::runImageSigmoid(std::vector< Image<float> > &imageMap,
00405                                  ContourNeuronCreate<float> &N, int iter)
00406 {
00407   calcGroups(imageMap,N,iter);
00408   iterateConvolve(iter,imageMap,N);
00409 }
00410 
00411 
00412 //#################################################################
00413 void contourRun::iterateConvolve(int iter,std::vector< Image<float> > &imageMap,
00414                                  ContourNeuronCreate<float> &N,
00415                                  const int node)
00416 {
00417   //RUN hypercolumn get charges for i put charges for i+1
00418   for(int a = 0; a < AnglesUsed; a++) //this neuron's angle
00419   {
00420     for(int b = 0; b < AnglesUsed; b++) //other neuron's angle
00421     {
00422       convolveSimple(iter,imageMap,N,a,b);
00423     }
00424   }
00425 }
00426 
00427 /*
00428 
00429 ---------Psuedo-Convolution Core for CINNIC. Current version-----------
00430 
00431 This is the meat of CINNIC. Here the hyper column is looped over
00432    for all neurons and interactions. While the main loop is only
00433    6 nested levels deep, multiple optimizations are added to test
00434    if a neuron should be used. For instance if it is zero skip it
00435 
00436 */
00437 
00438 //#################################################################
00439 void contourRun::convolveSimple(int iter,std::vector< Image<float> > &imageMap,
00440                                 ContourNeuronCreate<float> &N,
00441                                 const int a, const int b, const int node)
00442 {
00443   for(int i = 0; i < setImageSizeX; i++) //This Neuron's position X
00444   {
00445     for(int j = 0; j < setImageSizeY; j++) //This Neuron's position Y
00446     {
00447       if(imageMap[a].getVal(i,j) > 0.001F) //optimization
00448       {
00449 
00450         //MUST KILL OTHER NEURONS!!!! - fast placicity
00451         //sigmoid this at some point ?
00452         float mod;
00453         mod = NeuronMatrix[iter][a][i][j].getCharge()*fastPlast;
00454         if(mod < 1){mod = 1;}
00455         if(mod > 5){mod = 5;}
00456         int thisGroup = (int)Group.getVal(i,j);
00457         float crap = imageMap[a].getVal(i,j)*
00458           (passThroughGain/((GroupMod[thisGroup]*5)-4));
00459         NeuronMatrix[(iter+1)][a][i][j].
00460           ChargeSimple(crap);
00461         for(int k = 0; k <= XSize; k++) //Other Neuron's position X
00462         {
00463           //Insp2X = (InspX-i) + (int)XCenter;
00464           //Current position plus its field - center
00465           InspX = i + (k-(int)XCenter);
00466           if(InspX >= 0)
00467           {
00468             if(InspX < setImageSizeX)
00469             {
00470               for(int l = 0; l <= YSize; l++) //Other Neuron's position Y
00471               {
00472                 InspY = j - (l-(int)YCenter);
00473                 //stay inside of image
00474                 if(InspY >= 0)
00475                 {
00476                   if(InspY < setImageSizeY)
00477                   {
00478                     //check that this element != 0
00479                     if(N.FourDNeuralMap[a][b][k][l].zero)
00480                     {
00481                       //LINFO("%d,%d",InspX,InspY);
00482                       //optimization
00483                       if(imageMap[b].getVal(InspX,InspY) > orThresh )
00484                       {
00485                         //Insp2Y = (InspY-j) + (int)YCenter; // <-- FIX?
00486                         float hold;
00487                         //this is done this way to optimize the code
00488                         //for multiple
00489                         //iteration, by storing some of the first iterations
00490                         //results in an array
00491                         if(iter == 0)
00492                         {
00493                           //## it is important to note that ImageOpt
00494                           //= Image*energy, thus it's an optimization
00495                           //if this is a negative number ,
00496                           //then supression may be modified
00497 
00498                           //FORMULA HERE
00499                           //orient filered image1 * orient filtered image2
00500                           //* their excitation
00501 
00502                           storeVal[iterCounter] = ((imageOpt[a].getVal(i,j) *
00503                              imageMap[b].getVal(InspX,InspY)) *
00504                              N.FourDNeuralMap[a][b][k][l].angABD);
00505                         }
00506                         if(N.FourDNeuralMap[a][b][k][l].angABD < 0)
00507                         {
00508 
00509                           hold =
00510                             GroupMod[thisGroup] * mod * storeVal[iterCounter];
00511                         }
00512                         else
00513                         {
00514                           hold = mod * storeVal[iterCounter];
00515                         }
00516                         //set energy in sal map
00517                         NeuronMatrix[(iter+1)][b][InspX][InspY].
00518                           ChargeSimple(hold);
00519                         iterCounter++;
00520                       }
00521                     }
00522                   }
00523                 }
00524               }
00525             }
00526           }
00527         }
00528       }
00529     }
00530   }
00531 }
00532 
00533 
00534 //#################################################################
00535 void contourRun::contourRunMain(std::vector< Image<float> > &imageMap,
00536                                 ContourNeuronCreate<float> &N
00537                                 ,readConfig &config, Image<float> &group,
00538                                 int groups,int iter,float groupTop)
00539 {
00540 
00541   if(iter == 0)
00542   {
00543     GroupTop = groupTop;
00544     setConfig(config);
00545     setImageSize(imageMap[1].getWidth(),imageMap[1].getHeight());
00546     resetMatrix();
00547     preImage(imageMap,N);
00548     Group = group;
00549     // groups can be set to either 1 or 0 depending on whether or not you
00550     // want supression to happen only when a group reaches threshold
00551     // excitation or not.
00552     GroupMod.resize(groups,initialGroupVal);
00553     GroupMod2.resize(groups,initialGroupVal); //?
00554     GroupHold.resize(groups,0.0F);
00555     Groups = groups;
00556   }
00557   iterCounter = 0;
00558   runImageSigmoid(imageMap,N,iter);
00559 
00560 }
00561 
00562 // ######################################################################
00563 /* So things look consistent in everyone's emacs... */
00564 /* Local Variables: */
00565 /* indent-tabs-mode: nil */
00566 /* End: */
Generated on Sun May 8 08:40:22 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3