TrainingSet.H

Go to the documentation of this file.
00001 /*!@file TIGS/TrainingSet.H Manage a paired set of eye position data and input feature vectors */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Rob Peters <rjpeters at usc dot edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/TIGS/TrainingSet.H $
00035 // $Id: TrainingSet.H 9412 2008-03-10 23:10:15Z farhan $
00036 //
00037 
00038 #ifndef TIGS_TRAININGSET_H_DEFINED
00039 #define TIGS_TRAININGSET_H_DEFINED
00040 
00041 #include "Component/ModelComponent.H"
00042 #include "Component/ModelParam.H"
00043 #include "Image/Image.H"
00044 
00045 #include <vector>
00046 
00047 /// Manage a paired set of eye position data and input feature vectors
00048 class TrainingSet : public ModelComponent
00049 {
00050 public:
00051   TrainingSet(OptionManager& mgr, const std::string& fx_type);
00052 
00053   Dims scaledInputDims() const;
00054 
00055   size_t numPositions() const;
00056 
00057   int p2p(const int i, const int j) const;
00058 
00059   int p2p(const Point2D<int>& p) const;
00060 
00061   Image<float> recordSample(const Point2D<int>& loc,
00062                             const Image<float>& features);
00063 
00064   /// Load feature and position vectors from a pair of .pfm files (such as those produced by save())
00065   /** <pfx>-features.pfm should contain the features associated with a
00066       series of frames, with nrows=NFRAMES and ncols=NFEATURES where
00067       NFEATURES is the number of features in the feature vector
00068       generated by whichever feature extractor produced this training
00069       set
00070 
00071       <pfx>-positions.pfm should contain the eye position vectors
00072       associated with the same series of frames, with nrows=NFRAMES
00073       and ncols=W*H where W*H is the number of pixels in each image of
00074       the frame sequence
00075    */
00076   void load(const std::string& pfx);
00077 
00078   /// Like load(), but rebalance the training set so that eye positions are included equally often
00079   void loadRebalanced(const std::string& pfx);
00080 
00081   void save(const std::string& pfx);
00082 
00083   Image<float> getFeatures() const;
00084 
00085   Image<float> getPositions() const;
00086 
00087   uint inputReduction() const;
00088 
00089   const std::string& fxType() const;
00090 
00091 private:
00092   struct PosGroup
00093   {
00094     PosGroup(int nrow, int nfeat, int npos)
00095       :
00096       features(nrow, Image<float>(nfeat, 1, ZEROS)),
00097       positions(nrow, Image<float>(npos, 1, ZEROS)),
00098       counts(nrow, int(0)),
00099       totalcount(0),
00100       next(0)
00101     {}
00102 
00103     void add(const float* feat, const float* pos)
00104     {
00105       for (int i = 0; i < features[next].getWidth(); ++i)
00106         features[next][i] += feat[i];
00107 
00108       for (int i = 0; i < positions[next].getWidth(); ++i)
00109         positions[next][i] += pos[i];
00110 
00111       ++(counts[next]);
00112       ++totalcount;
00113       ++next;
00114       if (next == counts.size())
00115         next = 0;
00116     }
00117 
00118     std::vector<Image<float> > features;
00119     std::vector<Image<float> > positions;
00120     std::vector<uint> counts;
00121     uint totalcount;
00122     uint next;
00123   };
00124 
00125   OModelParam<Dims>     itsRawInputDims;
00126   OModelParam<bool>     itsDoRebalance;
00127   OModelParam<uint>     itsRebalanceThresh;
00128   OModelParam<uint>     itsRebalanceGroupSize;
00129   std::string  const    itsFxType;
00130   unsigned int const    itsReduction;
00131   size_t                itsNumFeatures;
00132   bool                  itsLocked;
00133   std::vector<float>    itsFeatureVec;
00134   std::vector<float>    itsPositionVec;
00135   std::vector<PosGroup> itsPosGroups;
00136   int                   itsNumTraining;
00137   int                   itsNumLoaded;
00138   mutable Image<float>  itsFeatures;
00139   mutable Image<float>  itsPositions;
00140 
00141   OModelParam<int>      itsDecimationFactor;
00142 };
00143 
00144 // ######################################################################
00145 /* So things look consistent in everyone's emacs... */
00146 /* Local Variables: */
00147 /* mode: c++ */
00148 /* indent-tabs-mode: nil */
00149 /* End: */
00150 
00151 #endif // TIGS_TRAININGSET_H_DEFINED
Generated on Sun May 8 08:06:56 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3