00001 /*!@file SceneUnderstanding/SceneUnderstanding.H */ 00002 00003 // //////////////////////////////////////////////////////////////////// // 00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005 // 00005 // by the University of Southern California (USC) and the iLab at USC. // 00006 // See http://iLab.usc.edu for information about this project. // 00007 // //////////////////////////////////////////////////////////////////// // 00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00010 // in Visual Environments, and Applications'' by Christof Koch and // 00011 // Laurent Itti, California Institute of Technology, 2001 (patent // 00012 // pending; application number 09/912,225 filed July 23, 2001; see // 00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00014 // //////////////////////////////////////////////////////////////////// // 00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00016 // // 00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00018 // redistribute it and/or modify it under the terms of the GNU General // 00019 // Public License as published by the Free Software Foundation; either // 00020 // version 2 of the License, or (at your option) any later version. // 00021 // // 00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00025 // PURPOSE. See the GNU General Public License for more details. // 00026 // // 00027 // You should have received a copy of the GNU General Public License // 00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00030 // Boston, MA 02111-1307 USA. // 00031 // //////////////////////////////////////////////////////////////////// // 00032 // 00033 // Primary maintainer for this file: Lior Elazary <elazary@usc.edu> 00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/plugins/SceneUnderstanding/SceneUnderstanding.H $ 00035 // $Id: SceneUnderstanding.H 13413 2010-05-15 21:00:11Z itti $ 00036 // 00037 00038 #ifndef SCENEUNDERSTANDING_SCENEUNDERSTANDING_H_DEFINED 00039 #define SCENEUNDERSTANDING_SCENEUNDERSTANDING_H_DEFINED 00040 00041 #include "Util/Types.H" // for uint 00042 #include "Image/Point2D.H" 00043 #include "Component/ModelComponent.H" 00044 #include "Component/ModelParam.H" 00045 #include "Component/ModelManager.H" 00046 #include "Learn/Bayes.H" 00047 #include "Learn/SWIProlog.H" 00048 #include "Channels/DescriptorVec.H" 00049 #include "Channels/SingleChannel.H" 00050 #include "Channels/ComplexChannel.H" 00051 #include "Neuro/StdBrain.H" 00052 #include "Neuro/VisualCortex.H" 00053 #include "Neuro/SimulationViewer.H" 00054 #include "plugins/SceneUnderstanding/WorkingMemory.H" 00055 #include "Simulation/SimEventQueue.H" 00056 #include "Simulation/SimEventQueueConfigurator.H" 00057 #include <vector> 00058 #include <string> 00059 00060 class SceneUnderstanding 00061 { 00062 public: 00063 00064 SceneUnderstanding(ModelManager *mgr, nub::ref<StdBrain> &brain); 00065 00066 //! Destructor 00067 ~SceneUnderstanding(); 00068 00069 //! get the brain 00070 nub::ref<StdBrain> getBrainPtr(); 00071 00072 //! Get the Bayes network 00073 Bayes* getBayesPtr(); 00074 00075 //! get the Prolog engine 00076 SWIProlog* getPrologPtr(); 00077 00078 //! get the descriptor vector 00079 DescriptorVec* getDescriptorVecPtr(); 00080 00081 //! Set the current image 00082 void setImage(Image<PixRGB<byte> > &img); 00083 00084 //! evolve the brain 00085 //! Return the intrest level 00086 float evolveBrain(); 00087 00088 //! Get the current fovea location (attention location) 00089 Point2D<int> getFoveaLoc(); 00090 00091 //! Get a feature vector from the fovea at location p 00092 std::vector<double> getFV(Point2D<int> foveaLoc); 00093 00094 //! classify the object under the fovea positioned at p 00095 int classifyFovea(Point2D<int> foveaLoc, double *prob=NULL); 00096 00097 //! Learn to associate the feature under fovea with object 00098 void learn(Point2D<int> foveaLoc, const char *name); 00099 00100 //! Try to bias the feature maps to locate object 00101 //! Return true if we know about the object 00102 bool biasFor(const char *name); 00103 00104 //! Recognize using high order information. in particuler use prolog KB 00105 std::string highOrderRec(); 00106 00107 private: 00108 00109 ModelManager *itsMgr; 00110 nub::ref<StdBrain> itsBrain; 00111 nub::soft_ref<SimEventQueue> itsSEQ; //gets set from the manager 00112 Bayes *itsBayes; 00113 SWIProlog *itsProlog; 00114 DescriptorVec* itsDescriptorVec; 00115 Image<PixRGB<byte> > itsImg; //Out current image on the retina 00116 Point2D<int> itsCurrentFoveaLoc; //The currentFovea Location 00117 WorkingMemory *itsWorkingMemory; //Our working memory 00118 00119 00120 }; 00121 00122 // ###################################################################### 00123 /* So things look consistent in everyone's emacs... */ 00124 /* Local Variables: */ 00125 /* indent-tabs-mode: nil */ 00126 /* End: */ 00127 00128 #endif //