WTAwinner.H

Go to the documentation of this file.
00001 /*!@file Neuro/WTAwinner.H a winner-take-all winner (i.e., covert shift of attention) */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2003   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Laurent Itti <itti@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Neuro/WTAwinner.H $
00035 // $Id: WTAwinner.H 9412 2008-03-10 23:10:15Z farhan $
00036 //
00037 
00038 #ifndef WTAWINNER_H_DEFINED
00039 #define WTAWINNER_H_DEFINED
00040 
00041 #include "Image/Point2D.H"
00042 #include "Util/Assert.H"
00043 #include "Util/MathFunctions.H"
00044 #include "Util/SimTime.H"
00045 
00046 //! This is a an open class representing a WTA winner (covert attention shift)
00047 /*! This class is an open (all members public) container for a
00048 Point2D<int>, time stamp, saliency voltage and possibly other things that
00049 characterize a WTA winner, or covert shift of attention. Like Point2D<int>,
00050 this class is fully inlined, so there is no WTAwinner.C file. */
00051 
00052 class WTAwinner
00053 {
00054 public:
00055   // note: no default constructor, use WTAwinner::NONE() if you want a
00056   // dummy invalid WTAwinner
00057 
00058   static inline WTAwinner NONE();
00059 
00060   //! Constructor given a Point2D<int>, time and other stuff
00061   inline WTAwinner(const Point2D<int>& pp, const SimTime& tt,
00062                    const double svv, const bool bor);
00063 
00064   //! Return coordinates scaled down to scale of saliency map
00065   inline Point2D<int> getSMcoords(const int smlevel) const;
00066 
00067   //! Build from coordinates given at the scale of the saliency map
00068   static inline WTAwinner buildFromSMcoords(const Point2D<int> smcoords,
00069                                             const int smlevel,
00070                                             const bool useRandom,
00071                                             const SimTime& tt,
00072                                             const double svv,
00073                                             const bool bor);
00074 
00075   //! returns true if coordinates are not (-1, -1)
00076   inline bool isValid() const;
00077 
00078   // public data members:
00079   Point2D<int> p;  //!< The spatial coordinates within the full-resolution input
00080   Point2D<int> smpos; //<! The scaled-down coordinates within the saliency map
00081   SimTime t;  //!< The time, in seconds
00082   double sv;  //!< The saliency map voltage at winning location, in Volts
00083   bool boring;//!< True is this shift was made out of boredom
00084 };
00085 
00086 // ######################################################################
00087 // #################### INLINED METHODS:
00088 // ######################################################################
00089 inline WTAwinner WTAwinner::NONE()
00090 {
00091   return WTAwinner(Point2D<int>(-1,-1), SimTime::ZERO(), 0.0, false);
00092 }
00093 
00094 // ######################################################################
00095 inline WTAwinner::WTAwinner(const Point2D<int>& pp, const SimTime& tt,
00096                             const double svv, const bool bor) :
00097   p(pp), smpos(-1, -1), t(tt), sv(svv), boring(bor)
00098 { }
00099 
00100 // ######################################################################
00101 inline Point2D<int> WTAwinner::getSMcoords(const int smlevel) const
00102 {
00103   // the 0.49 offset below is to eliminate possible random jitter
00104   // introduced by buildFromSMcoords():
00105   return Point2D<int>(int(p.i / double(1 << smlevel) + 0.49),
00106                  int(p.j / double(1 << smlevel) + 0.49));
00107 }
00108 
00109 // ######################################################################
00110 inline WTAwinner WTAwinner::buildFromSMcoords(const Point2D<int> smcoords,
00111                                               const int smlevel,
00112                                               const bool useRandom,
00113                                               const SimTime& tt,
00114                                               const double svv,
00115                                               const bool bor)
00116 {
00117   WTAwinner result = WTAwinner::NONE();
00118 
00119   // scale coords up: By construction of our pyramids and of decX(),
00120   // decY() and friends, the coordinates of a saliency-map pixel
00121   // represent information around the top-left corner of that pixel,
00122   // not the center of tha pixel. So we simply scale the coordinates
00123   // up and do not attempt to center them over the extent of the
00124   // saliency map pixel:
00125   result.p.i = smcoords.i << smlevel;
00126   result.p.j = smcoords.j << smlevel;
00127 
00128   // add random jitter:
00129   if (useRandom)
00130     {
00131       int jitter = 1 << smlevel;  // saliency pixel size
00132       result.p.i +=
00133         int(jitter * (randomDouble()*0.98 - 0.49));  // up to +/- SMpix*0.49
00134       result.p.j +=
00135         int(jitter * (randomDouble()*0.98 - 0.49));
00136       if (result.p.i < 0) result.p.i = 0;
00137       if (result.p.j < 0) result.p.j = 0;
00138     }
00139 
00140   // initialize other members:
00141   result.smpos = smcoords;
00142   result.t = tt;
00143   result.sv = svv;
00144   result.boring = bor;
00145 
00146   // double-check that round-trip scaling-up/scaling-down gives us
00147   // back the same saliency map coords that we started with:
00148   ASSERT(result.getSMcoords(smlevel) == result.smpos);
00149 
00150   return result;
00151 }
00152 
00153 // ######################################################################
00154 inline bool WTAwinner::isValid() const
00155 { return p.isValid(); }
00156 
00157 // ######################################################################
00158 /* So things look consistent in everyone's emacs... */
00159 /* Local Variables: */
00160 /* indent-tabs-mode: nil */
00161 /* End: */
00162 #endif
Generated on Sun May 8 08:05:26 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3