00001 /*!@file Neuro/SimulationViewerSurpCont.H entry interface between INVT and ASAC */ 00002 00003 // //////////////////////////////////////////////////////////////////// // 00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2003 // 00005 // by the University of Southern California (USC) and the iLab at USC. // 00006 // See http://iLab.usc.edu for information about this project. // 00007 // //////////////////////////////////////////////////////////////////// // 00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00010 // in Visual Environments, and Applications'' by Christof Koch and // 00011 // Laurent Itti, California Institute of Technology, 2001 (patent // 00012 // pending; application number 09/912,225 filed July 23, 2001; see // 00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00014 // //////////////////////////////////////////////////////////////////// // 00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00016 // // 00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00018 // redistribute it and/or modify it under the terms of the GNU General // 00019 // Public License as published by the Free Software Foundation; either // 00020 // version 2 of the License, or (at your option) any later version. // 00021 // // 00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00025 // PURPOSE. See the GNU General Public License for more details. // 00026 // // 00027 // You should have received a copy of the GNU General Public License // 00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00030 // Boston, MA 02111-1307 USA. // 00031 // //////////////////////////////////////////////////////////////////// // 00032 // 00033 // Primary maintainer for this file: T. Nathan Mundhenk <mundhenk@usc.edu> 00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Neuro/SimulationViewerSurpCont.H $ 00035 // $Id: SimulationViewerSurpCont.H 10714 2009-02-01 07:16:36Z itti $ 00036 // 00037 00038 #ifndef SIMULATIONVIEWERSURPCONT_H_DEFINED 00039 #define SIMULATIONVIEWERSURPCONT_H_DEFINED 00040 00041 #include "Neuro/ScaleSurpriseControl.H" 00042 00043 #include "Component/ModelParam.H" 00044 #include "Image/ArrayData.H" // for Dims 00045 #include "Image/Image.H" // for Image 00046 #include "Image/ImageSet.H" 00047 #include "Image/Pixels.H" // for PixRGB<byte> 00048 #include "Image/Point2DT.H" // for Point2DT 00049 #include "Media/MediaSimEvents.H" 00050 #include "Neuro/NeuroSimEvents.H" 00051 #include "Neuro/SimulationViewer.H" 00052 #include "Neuro/SpatialMetrics.H" 00053 #include "Neuro/VisualCortex.H" 00054 #include "Neuro/WTAwinner.H" // for WTAwinner 00055 #include "Util/SimTime.H" 00056 #include "rutz/shared_ptr.h" // for rutz::shared_ptr 00057 #include <fstream> 00058 00059 class Brain; 00060 class FrameOstream; 00061 00062 //! This class provides an interface into SurpriseControl (ASAC) 00063 /*! Surprise Control takes in an image and a set of final stage conspicuity 00064 images at different scales and uses them to drive different filters 00065 that are designed to reduce/control surprise in an image. 00066 00067 In general, any type of conspicuity map can be used, but it's primarily 00068 designed to work with Surprise images. However, this version requires 00069 the standard default scales and channels (rg,by... etc). It does not 00070 know how to handle different features such as junctions. 00071 00072 You need to supply: 00073 - A FrameSeries of raw images one at a time (per usual) 00074 - A configFile , there should be a default in etc 00075 - A brain , this is needed to extract the conspicuity maps 00076 00077 This class will return: 00078 - A surprise controlled image 00079 - A difference image showing what it changed 00080 - A beta image showing its internal temporal smoothed surprise maps 00081 00082 T. Nathan Mundhenk <mundhenk@usc.edu> 00083 */ 00084 class SimulationViewerSurpCont : public SimulationViewer { 00085 public: 00086 // ###################################################################### 00087 /*! @name Constructors and destructors */ 00088 //@{ 00089 00090 //! Constructor. See ModelComponent.H. 00091 /*! @param mgr our ModelManager (see ModelManager.H) 00092 @param descrName descriptive name for human usage 00093 @param tagName name for ParamMap usage */ 00094 SimulationViewerSurpCont(OptionManager& mgr, 00095 const std::string& descrName = "ASAC Surprise Control", 00096 const std::string& tagName = "SurpriseControl"); 00097 //! Destructor 00098 virtual ~SimulationViewerSurpCont(); 00099 00100 //! Set our brain 00101 virtual void setBrain(Brain* brain); 00102 00103 //@} 00104 00105 //! Initialize everything. Input the raw image size 00106 void init(const ushort baseSizeX,const ushort baseSizeY); 00107 00108 protected: 00109 //! Callback for when a new input frame is available 00110 SIMCALLBACK_DECLARE(SimulationViewerSurpCont, SimEventInputFrame); 00111 00112 //! Callback for every clock tick 00113 SIMCALLBACK_DECLARE(SimulationViewerSurpCont, SimEventClockTick); 00114 00115 //! Callback for every time we should save our outputs 00116 SIMCALLBACK_DECLARE(SimulationViewerSurpCont, SimEventSaveOutput); 00117 00118 //! Save our various results 00119 virtual void save1(const ModelComponentSaveInfo& sinfo); 00120 00121 //! save results 00122 void saveResults(const nub::ref<FrameOstream>& ofs); 00123 00124 //! return the surprise controlled image 00125 Image<PixRGB<float> > getResult(); 00126 00127 //! return the difference images if needed 00128 std::vector<Image<PixRGB<float> > > getDiffImages(); 00129 00130 //! return the beta images if needed 00131 std::vector<Image<float> > getBetaImages(); 00132 00133 protected: 00134 00135 //! Draw current time onto given image 00136 void drawTime(Image< PixRGB<byte> >& image); 00137 00138 //! metrics that depend on the input size: 00139 nub::ref<SpatialMetrics> itsMetrics; 00140 00141 //! Should we output the difference image between the pre/post image 00142 OModelParam<bool> itsDrawDiffParts; 00143 //! should we output the surprise maps used here? 00144 OModelParam<bool> itsDrawBetaParts; 00145 //! should we output the bias maps used here? 00146 OModelParam<bool> itsDrawBiasParts; 00147 //! should we output the seperable filter layers used here? 00148 OModelParam<bool> itsDrawSeperableParts; 00149 //! config file to open 00150 OModelParam<std::string> itsConfigFile; 00151 //! LevelSpec used by our channels, used to compute output dims 00152 OModelParam<LevelSpec> itsLevelSpec; 00153 00154 private: 00155 SimTime itsCurrTime; // current time 00156 ScaleSurpriseControl<float> itsScaleSurpriseControl; 00157 //! local copy of the input image 00158 Image<PixRGB<float> > itsInput; 00159 //! input changed since last getTraj() 00160 bool itsHasNewInput; 00161 //! tells us if we need to initialize ScaleSurpriseControl 00162 bool itsInit; 00163 }; 00164 00165 #endif 00166 00167 // ###################################################################### 00168 /* So things look consistent in everyone's emacs... */ 00169 /* Local Variables: */ 00170 /* indent-tabs-mode: nil */ 00171 /* End: */