SurpriseMap.C

Go to the documentation of this file.
00001 /*!@file Surprise/SurpriseMap.C a surprise map */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2003   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Laurent Itti <itti@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Surprise/SurpriseMap.C $
00035 // $Id: SurpriseMap.C 11562 2009-08-08 00:35:40Z dberg $
00036 //
00037 
00038 #include "Surprise/SurpriseMap.H"
00039 
00040 #include "Image/Kernels.H"  // for gaussianBlob()
00041 #include "Image/MathOps.H"
00042 #include "Util/Assert.H"
00043 
00044 // ######################################################################
00045 template <class T>
00046 SurpriseMap<T>::SurpriseMap() :
00047   itsModels(), itsQlen(0), itsInitialModel(),
00048   itsNeighSigma(0.0f), itsLocSigma(0.0f), itsNweights(), itsNWmin(0.0f),
00049   itsNeighUpdFac(0.7), itsProbe(-1, -1), itsSLfac(1.0), itsSSfac(0.1),
00050   itsJointKLBiasType(SU_KL_NONE),
00051   itsTakeSTMax(false)
00052 { }
00053 
00054 // ######################################################################
00055 template <class T>
00056 void SurpriseMap<T>::init(const uint qlen, const double updatefac,
00057                           const double neighupdatefac,
00058                           const double sampleval, const double samplevar,
00059                           const float neighsigma, const float locsigma,
00060                           const Point2D<int>& probe, const double slfac,
00061                           const double ssfac, const SU_KL_BIAS klbias,
00062                           const bool takeSTMax)
00063 {
00064   itsModels.clear();
00065   itsNweights.freeMem();
00066   itsInitialModel.init(updatefac, sampleval, samplevar);
00067 
00068   itsQlen            = qlen;
00069   itsNeighSigma      = neighsigma;
00070   itsLocSigma        = locsigma;
00071   itsNeighUpdFac     = neighupdatefac;
00072   itsProbe           = probe;
00073   itsSLfac           = slfac;
00074   itsSSfac           = ssfac;
00075   itsJointKLBiasType = klbias;
00076   itsTakeSTMax       = takeSTMax;
00077 }
00078 
00079 // ######################################################################
00080 template <class T>
00081 SurpriseMap<T>::~SurpriseMap()
00082 { }
00083 
00084 // ######################################################################
00085 template <class T>
00086 void SurpriseMap<T>::reset()
00087 {
00088   for (uint i = 0; i < itsModels.size(); i ++)
00089     itsModels[i].reset();
00090 }
00091 
00092 // ######################################################################
00093 template <class T>
00094 void SurpriseMap<T>::initModels(const SurpriseImage<T>& sample,
00095                                 const bool setBias)
00096 {
00097   // resize and reset our queue of models:
00098   SurpriseImage<T> models(sample.getDims()); // NOTE: uninitialized models
00099 
00100   if(setBias)
00101     itsInitialModel.setBias(itsSLfac,itsSSfac,itsJointKLBiasType);
00102 
00103   models.clear(itsInitialModel);
00104   models.reset();
00105 
00106   itsModels.clear();
00107 
00108   for (uint i = 0; i < itsQlen; i ++) itsModels.push_back(models);
00109 
00110   // compute our Difference-of-Gaussians mask of weights:
00111   const int   w     = sample.getWidth();
00112   const int   h     = sample.getHeight();
00113   const float sigma = itsNeighSigma * float(std::max(w, h));
00114   const Dims          d(w * 2 + 1, h * 2 + 1);
00115   const Point2D<int>  p(w, h);
00116 
00117   itsNweights  = gaussianBlob<float>(d, p, sigma, sigma);
00118   itsNweights -= gaussianBlob<float>(d, p, itsLocSigma, itsLocSigma) *
00119     (itsLocSigma*itsLocSigma / (sigma * sigma) * 1.5f);
00120 
00121   inplaceRectify(itsNweights);  // eliminate negative values
00122 
00123   float mi, ma;
00124   getMinMax(itsNweights, mi, ma);
00125   itsNWmin = 0.01f * ma;
00126 
00127   // zero low weights
00128   for(Image<float>::iterator w = itsNweights.beginw();
00129       w != itsNweights.endw(); w++)
00130     if(*w <= itsNWmin) *w = 0;
00131 
00132 }
00133 
00134 // ######################################################################
00135 template <>
00136 Image<double> SurpriseMap<SurpriseModelPM>::
00137 surprise(const SurpriseImage<SurpriseModelPM>& sample)
00138 {
00139   // the generic version for other models is implemented in the next function
00140 
00141   // is it the first time we are called? if so, we need to setup our
00142   // size and reset our neighborhood cache:
00143   if (itsModels.empty())
00144     initModels(sample);
00145   else if (itsModels[0].isSameSize(sample) == false)
00146     LFATAL("Inconsistent input size!");
00147 
00148   // each model feeds into the next one. The first (fastest) model
00149   // receives the sample from the image as input, and is updated. The
00150   // updated model then serves as input to the next slower model, and
00151   // so on. Total surprise is the product from all models:
00152   SurpriseImage<SurpriseModelPM> input(sample);
00153   Image<double> s;
00154   const bool doprobe = input.coordsOk(itsProbe);
00155   double locmean = 0.0, locvar = 0.0, neimean = 0.0, neivar = 0.0; // for probe
00156 
00157   for (uint i = 0; i < itsModels.size(); i ++)
00158     {
00159       itsModels[i].resetUpdFac(0.85F);
00160       // For covariant models we may need to analyze the covarance factors
00161       // and update the hyper parameters from the updates before we go
00162       // ahead and compute surprise as usual.
00163       itsModels[i].preComputeHyperParams(input);
00164       // In this instance, the model is covaried with itself.
00165       // Spatial and temporal concerns are computed at one time.
00166       itsModels[i].neighborhoods(itsModels[i], itsNweights, true);
00167 
00168       // show values at a probe location?
00169       if (doprobe)
00170         {
00171           locmean = itsModels[i].getMean().getVal(itsProbe);
00172           locvar  = itsModels[i].getVar().getVal(itsProbe);
00173         }
00174 
00175       // update local models and compute the local temporal surprise:
00176       const Image<double> sl = itsModels[i].surprise(input);
00177 
00178 
00179       // the total surprise is a weighted sum of local temporal
00180       // surprise and spatial surprise:
00181       Image<double> stot(sl.getDims(), ZEROS);
00182       if (itsSLfac)
00183         {
00184           if (itsSLfac != 1.0) stot = sl * itsSLfac; else stot = sl;
00185         }
00186 
00187       // total surprise combines multiplicatively across models with
00188       // different time scales:
00189       if (i == 0) s = stot; else s *= stot;
00190 
00191       // save debug output from a probe location:
00192       if (doprobe)
00193         {
00194           LERROR("MODELS: %d   %g %g   %g %g   %g %g", i,
00195                  input.getMean().getVal(itsProbe),
00196                  input.getVar().getVal(itsProbe),
00197                  locmean, locvar, neimean, neivar);
00198           LERROR("SURPRISE: %d %g %g", i, sl.getVal(itsProbe),
00199                  sl.getVal(itsProbe));
00200         }
00201 
00202       // the updated models are the input to the next iteration:
00203       input = itsModels[i];
00204     }
00205 
00206   // the results here should always be positive but sometimes turn
00207   // negative due to rounding errors in the surprise computation.
00208   // Let's clamp just to be sure, otherwise we'll end up with some
00209   // NaNs at later stages:
00210   inplaceRectify(s);
00211 
00212   // calm down total surprise and preserve units of wows:
00213   // We multiply times 10 to scale with other surprise better
00214   s = toPower(s, 1.0 / (3.0 * double(itsModels.size()))) * 5;
00215 
00216   // return total surprise:
00217   return s;
00218 
00219 }
00220 
00221 // ######################################################################
00222 template <>
00223 Image<double> SurpriseMap<SurpriseModelGG>::
00224 surprise(const SurpriseImage<SurpriseModelGG>& sample)
00225 {
00226   // is it the first time we are called? if so, we need to setup our
00227   // size and reset our neighborhood cache:
00228   if (itsModels.empty())
00229     initModels(sample,true);
00230   else if (itsModels[0].isSameSize(sample) == false)
00231     LFATAL("Inconsistent input size!");
00232 
00233   // each model feeds into the next one. The first (fastest) model
00234   // receives the sample from the image as input, and is updated. The
00235   // updated model then serves as input to the next slower model, and
00236   // so on. Total surprise is the product from all models:
00237   SurpriseImage<SurpriseModelGG> input(sample);
00238   Image<double> s;
00239   const bool doprobe = input.coordsOk(itsProbe);
00240   double locmean = 0.0, locvar = 0.0, neimean = 0.0, neivar = 0.0; // for probe
00241 
00242   for (uint i = 0; i < itsModels.size(); i ++)
00243     {
00244       // compute neighborhood models from our current (old) local models:
00245       itsModels[i].neighborhoods(input, itsNweights);
00246       //itsModels[i].neighborhoods(input, itsNweights, itsNWmin, true);
00247       //itsModels[i].neighborhoods(input,20);
00248 
00249       // show values at a probe location?
00250       if (doprobe)
00251         {
00252           locmean = itsModels[i].getMean().getVal(itsProbe);
00253           locvar  = itsModels[i].getVar().getVal(itsProbe);
00254         }
00255 
00256       // update local models and compute the local temporal surprise:
00257       const Image<double> sl = itsModels[i].surprise(input);
00258 
00259       // the total surprise is a weighted sum of local temporal
00260       // surprise and spatial surprise:
00261       Image<double> stot(sl.getDims(), ZEROS);
00262       if (itsSLfac)
00263         {
00264           if (itsSLfac != 1.0) stot = sl * itsSLfac; else stot = sl;
00265         }
00266 
00267       // total surprise combines multiplicatively across models with
00268       // different time scales:
00269       if (i == 0) s = stot; else s *= stot;
00270 
00271       // save debug output from a probe location:
00272       if (doprobe)
00273         {
00274           LERROR("MODELS: %d   %g %g   %g %g   %g %g", i,
00275                  input.getMean().getVal(itsProbe),
00276                  input.getVar().getVal(itsProbe),
00277                  locmean, locvar, neimean, neivar);
00278           LERROR("SURPRISE: %d %g", i, sl.getVal(itsProbe));
00279         }
00280 
00281       // the updated models are the input to the next iteration:
00282       input = itsModels[i];
00283     }
00284 
00285   // the results here should always be positive but sometimes turn
00286   // negative due to rounding errors in the surprise computation.
00287   // Let's clamp just to be sure, otherwise we'll end up with some
00288   // NaNs at later stages:
00289   inplaceRectify(s);
00290 
00291   // calm down total surprise and preserve units of wows:
00292   s = toPower(s, 1.0 / (3.0 * double(itsModels.size())));
00293   s = logSig(s,1,1);
00294   //double min, max;
00295   //getMinMax(s,min,max);
00296   //LINFO("S max %f min %f",min,max);
00297 
00298   // return total surprise:
00299   return s;
00300 }
00301 
00302 // ######################################################################
00303 template <class T>
00304 Image<double> SurpriseMap<T>::surprise(const SurpriseImage<T>& sample)
00305 {
00306   // is it the first time we are called? if so, we need to setup our
00307   // size and reset our neighborhood cache:
00308   if (itsModels.empty())
00309     initModels(sample);
00310   else if (itsModels[0].isSameSize(sample) == false)
00311     LFATAL("Inconsistent input size!");
00312 
00313   // each model feeds into the next one. The first (fastest) model
00314   // receives the sample from the image as input, and is updated. The
00315   // updated model then serves as input to the next slower model, and
00316   // so on. Total surprise is the product from all models:
00317   SurpriseImage<T> input(sample);
00318   Image<double> s;
00319   const bool doprobe = input.coordsOk(itsProbe);
00320   double locmean = 0.0, locvar = 0.0, neimean = 0.0, neivar = 0.0; // for probe
00321 
00322   // Combine space and time using Max or should we combine them as a product?
00323   if(itsTakeSTMax)
00324     {
00325       Image<double> t;
00326       for (uint i = 0; i < itsModels.size(); i ++)
00327         {
00328           // compute neighborhood models from our current (old) local models:
00329           SurpriseImage<T> neigh;
00330           neigh.neighborhoods(input, itsNweights);
00331           if (itsNeighUpdFac != 0.0) // use different update fac for neighs?
00332             neigh.resetUpdFac(itsNeighUpdFac); // higher fac -> stronger popout
00333 
00334           // show values at a probe location?
00335           if (doprobe)
00336             {
00337               locmean = itsModels[i].getMean().getVal(itsProbe);
00338               locvar  = itsModels[i].getVar().getVal(itsProbe);
00339               neimean = neigh.getMean().getVal(itsProbe);
00340               neivar  = neigh.getVar().getVal(itsProbe);
00341             }
00342 
00343           // update local models and compute the local temporal surprise:
00344           const Image<double> sl = itsModels[i].surprise(input) * itsSLfac;
00345           // compute spatial surprise:
00346           const Image<double> ss = neigh.surprise(input)        * itsSSfac;
00347 
00348           // Compute product of space and time seperatly
00349           if (i == 0) { s  = ss; t  = sl; }
00350           else        { s *= ss; t *= sl; }
00351 
00352           // save debug output from a probe location:
00353           if (doprobe)
00354             {
00355               LERROR("MODELS: %d   %g %g   %g %g   %g %g", i,
00356                      input.getMean().getVal(itsProbe),
00357                      input.getVar().getVal(itsProbe),
00358                      locmean, locvar, neimean, neivar);
00359               LERROR("SURPRISE: %d %g %g", i, sl.getVal(itsProbe),
00360                      ss.getVal(itsProbe));
00361             }
00362 
00363           // the updated models are the input to the next iteration:
00364           input = itsModels[i];
00365         }
00366       // take the max of either space or temporal surprise
00367       s = takeMax(s,t);
00368     }
00369   else
00370     {
00371       for (uint i = 0; i < itsModels.size(); i ++)
00372         {
00373           // compute neighborhood models from our current (old) local models:
00374           SurpriseImage<T> neigh;
00375           neigh.neighborhoods(input, itsNweights);
00376           if (itsNeighUpdFac != 0.0) // use different update fac for neighs?
00377             neigh.resetUpdFac(itsNeighUpdFac); // higher fac -> stronger popout
00378 
00379           // show values at a probe location?
00380           if (doprobe)
00381             {
00382               locmean = itsModels[i].getMean().getVal(itsProbe);
00383               locvar  = itsModels[i].getVar().getVal(itsProbe);
00384               neimean = neigh.getMean().getVal(itsProbe);
00385               neivar  = neigh.getVar().getVal(itsProbe);
00386             }
00387 
00388           // update local models and compute the local temporal surprise:
00389           const Image<double> sl = itsModels[i].surprise(input);
00390           // compute spatial surprise:
00391           const Image<double> ss = neigh.surprise(input);
00392 
00393           // the total surprise is a weighted sum of local temporal
00394           // surprise and spatial surprise:
00395           Image<double> stot(sl.getDims(), ZEROS);
00396           if (itsSLfac)
00397             {
00398               if (itsSLfac != 1.0) stot = sl * itsSLfac; else stot = sl;
00399             }
00400           if (itsSSfac)
00401             {
00402               if (itsSSfac != 1.0) stot += ss * itsSSfac; else stot += ss;
00403             }
00404 
00405           if (i == 0) s  = stot;
00406           else        s *= stot;
00407 
00408           // save debug output from a probe location:
00409           if (doprobe)
00410             {
00411               LERROR("MODELS: %d   %g %g   %g %g   %g %g", i,
00412                      input.getMean().getVal(itsProbe),
00413                      input.getVar().getVal(itsProbe),
00414                      locmean, locvar, neimean, neivar);
00415               LERROR("SURPRISE: %d %g %g", i, sl.getVal(itsProbe),
00416                      ss.getVal(itsProbe));
00417             }
00418 
00419           // the updated models are the input to the next iteration:
00420           input = itsModels[i];
00421         }
00422     }
00423 
00424 
00425   // the results here should always be positive but sometimes turn
00426   // negative due to rounding errors in the surprise computation.
00427   // Let's clamp just to be sure, otherwise we'll end up with some
00428   // NaNs at later stages:
00429   inplaceRectify(s);
00430 
00431   // calm down total surprise and preserve units of wows:
00432   s = toPower(s, 1.0 / (3.0 * double(itsModels.size())));
00433 
00434   // return total surprise:
00435   return s;
00436 }
00437 
00438 // ######################################################################
00439 template <class T>
00440 const SurpriseImage<T>& SurpriseMap<T>::getSurpriseImage(const
00441                                                          uint index) const
00442 {
00443   ASSERT(index < itsModels.size());
00444   return itsModels[index];
00445 }
00446 
00447 
00448 // ######################################################################
00449 // explicit instantiations:
00450 template class SurpriseMap<SurpriseModelSG>;
00451 template class SurpriseMap<SurpriseModelSP>;
00452 template class SurpriseMap<SurpriseModelSP1>;
00453 template class SurpriseMap<SurpriseModelSPC>;
00454 template class SurpriseMap<SurpriseModelSPF>;
00455 template class SurpriseMap<SurpriseModelCS>;
00456 template class SurpriseMap<SurpriseModelGG>;
00457 template class SurpriseMap<SurpriseModelPM>;
00458 template class SurpriseMap<SurpriseModelOD>;
00459 
00460 // ######################################################################
00461 /* So things look consistent in everyone's emacs... */
00462 /* Local Variables: */
00463 /* indent-tabs-mode: nil */
00464 /* End: */
Generated on Sun May 8 08:42:20 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3