szvision.C

Go to the documentation of this file.
00001 /*!@file VFAT/szvision.C  simplified version of vision.C with feature analysis
00002  */
00003 
00004 // //////////////////////////////////////////////////////////////////// //
00005 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00006 // University of Southern California (USC) and the iLab at USC.         //
00007 // See http://iLab.usc.edu for information about this project.          //
00008 // //////////////////////////////////////////////////////////////////// //
00009 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00010 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00011 // in Visual Environments, and Applications'' by Christof Koch and      //
00012 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00013 // pending; application number 09/912,225 filed July 23, 2001; see      //
00014 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00015 // //////////////////////////////////////////////////////////////////// //
00016 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00017 //                                                                      //
00018 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00019 // redistribute it and/or modify it under the terms of the GNU General  //
00020 // Public License as published by the Free Software Foundation; either  //
00021 // version 2 of the License, or (at your option) any later version.     //
00022 //                                                                      //
00023 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00024 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00025 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00026 // PURPOSE.  See the GNU General Public License for more details.       //
00027 //                                                                      //
00028 // You should have received a copy of the GNU General Public License    //
00029 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00030 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00031 // Boston, MA 02111-1307 USA.                                           //
00032 // //////////////////////////////////////////////////////////////////// //
00033 //
00034 // Primary maintainer for this file: T. Nathan Mundhenk <mundhenk@usc.edu>
00035 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/VFAT/szvision.C $
00036 // $Id: szvision.C 14376 2011-01-11 02:44:34Z pez $
00037 //
00038 
00039 // ############################################################
00040 // ############################################################
00041 // ##### --- VFAT ---
00042 // ##### Vision Feature Analysis Tool:
00043 // ##### T. Nathan Mundhenk nathan@mundhenk.com
00044 // ##### Laurent Itti itti@pollux.usc.edu
00045 // #####
00046 // ############################################################
00047 // ############################################################
00048 
00049 #include "Component/ModelManager.H"
00050 #include "VFAT/featureClusterVision.H"
00051 #include "Media/MediaSimEvents.H"
00052 #include "Neuro/NeuroSimEvents.H"
00053 #include "Simulation/SimEventQueueConfigurator.H"
00054 
00055 int main(const int argc, const char **argv)
00056 {
00057   MYLOGVERB = LOG_INFO;  // suppress debug messages
00058 
00059   // Instantiate a ModelManager:
00060   ModelManager manager("Attention Model");
00061 
00062   // Instantiate our various ModelComponents:
00063   nub::soft_ref<SimEventQueueConfigurator>
00064     seqc(new SimEventQueueConfigurator(manager));
00065   manager.addSubComponent(seqc);
00066 
00067   nub::soft_ref<InputFrameSeries> ifs(new InputFrameSeries(manager));
00068   manager.addSubComponent(ifs);
00069 
00070   nub::soft_ref<OutputFrameSeries> ofs(new OutputFrameSeries(manager));
00071   manager.addSubComponent(ofs);
00072 
00073   nub::soft_ref<StdBrain> brain(new StdBrain(manager));
00074   manager.addSubComponent(brain);
00075 
00076   // feature analysis part of model
00077   const std::string name = "featureCluster";
00078   const std::string tag = "fCV";
00079   Image< PixRGB<byte> > input;
00080 
00081   std::vector<covHolder<double> > covHolder;
00082   //unsigned int covHolderSize;
00083 
00084   // Parse command-line:
00085   if (manager.parseCommandLine(argc, argv,
00086                                "[0--5 args]", 0, 5) == false)
00087     return(1);
00088 
00089   nub::soft_ref<featureClusterVision<float> >
00090     fCV(new featureClusterVision<float>(manager,name,tag,brain,ifs,
00091                                         manager.getExtraArg(0)));
00092   manager.addSubComponent(fCV);
00093   nub::soft_ref<SimEventQueue> seq = seqc->getQ();
00094 
00095   // let's get all our ModelComponent instances started:
00096   manager.start();
00097   // main loop:
00098 
00099   while(1) {
00100     // write outputs or quit?
00101     bool gotcovert = false;
00102     if (seq->check<SimEventWTAwinner>(0)) gotcovert = true;
00103     const FrameState os = ofs->update(seq->now(), gotcovert);
00104 
00105     if (os == FRAME_NEXT || os == FRAME_FINAL)
00106     {
00107       SimModuleSaveInfo sinfo(ofs, *seq);
00108       brain->save(sinfo);
00109       int foo = ifs->frame();
00110       std::string Myname;
00111       std::string a = manager.getExtraArg(0);
00112       std::string b = ".";
00113       char c[100];
00114       if(foo == 1)
00115         ; //init = false;
00116       if(foo < 10)
00117         sprintf(c,"00000%d",foo);
00118       else if(foo < 100)
00119         sprintf(c,"0000%d",foo);
00120       else if(foo < 1000)
00121         sprintf(c,"000%d",foo);
00122       else if(foo < 10000)
00123         sprintf(c,"00%d",foo);
00124       else if(foo < 100000)
00125         sprintf(c,"0%d",foo);
00126       else
00127         sprintf(c,"%d",foo);
00128       Myname = a + b + c;
00129       // NOTE: added '0' at the end here because there was no value
00130       // matching the final '%d'
00131       LINFO("RUNNING FRAME %d NTARG %d",foo,0);
00132       // Upload a frame to the classifier
00133       //input = rescale(input,input.getWidth()/2,input.getHeight()/2);
00134       fCV->fCVuploadImage(input,Myname);
00135       fCV->fCVstandAloneFeatureTest(manager.getExtraArg(0));
00136 
00137       // classify and cluster this image
00138       /*fCV->fCVsaccadeTest(manager.getExtraArg(1),
00139                               manager.getExtraArg(2),
00140                               manager.getExtraArg(3),
00141                               manager.getExtraArg(4));*/
00142 
00143       // get back image data
00144     }
00145 
00146     if (os == FRAME_FINAL)
00147       break;
00148 
00149     // why do we handle the output before the input? That's because
00150     // both the input and output frame series will switch to the next
00151     // frame at the exact same time, if input and output framerates
00152     // are equal. When the input series switches to a new frame, it
00153     // will reset any drawings that were obtained on the previous
00154     // frame. So we need to make sure we have saved those results
00155     // before we read the new frame in.
00156 
00157     // if we displayed a bunch of images, let's pause:
00158     if (ifs->shouldWait() || ofs->shouldWait())
00159       Raster::waitForKey();
00160 
00161     // read new image in?
00162     const FrameState is = ifs->update(seq->now());
00163     if (is == FRAME_COMPLETE) break; // done
00164     if (is == FRAME_NEXT || is == FRAME_FINAL) // new frame
00165     {
00166       input = ifs->readRGB();
00167 
00168       // empty image signifies end-of-stream
00169       if (input.initialized())
00170         {
00171           rutz::shared_ptr<SimEventInputFrame>
00172             e(new SimEventInputFrame(brain.get(), GenericFrame(input), 0));
00173           seq->post(e); // post the image to the brain
00174 
00175           // show memory usage if in debug mode:
00176           if (MYLOGVERB >= LOG_DEBUG)
00177             SHOWMEMORY("MEMORY USAGE: frame %d t=%.1fms", ifs->frame(),
00178                        seq->now().msecs());
00179         }
00180     }
00181 
00182     // evolve brain:
00183     const SimStatus status = seq->evolve();
00184 
00185     if (SIM_BREAK == status) // Brain decided it's time to quit
00186       break;
00187   }
00188   // stop all our ModelComponents
00189 
00190   manager.stop();
00191 
00192   // all done!
00193   return 0;
00194 }
00195 
00196 // ######################################################################
00197 /* So things look consistent in everyone's emacs... */
00198 /* Local Variables: */
00199 /* indent-tabs-mode: nil */
00200 /* End: */
Generated on Sun May 8 08:42:36 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3