activeObjRec.C

Go to the documentation of this file.
00001 /*! @file ObjRec/activeObjRec.C test various obj rec alg */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Lior Elazary <elazary@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/ObjRec/activeObjRec.C $
00035 // $Id: activeObjRec.C 10982 2009-03-05 05:11:22Z itti $
00036 //
00037 
00038 #include "Channels/SubmapAlgorithmBiased.H"
00039 #include "Component/ModelManager.H"
00040 #include "Component/OptionManager.H"
00041 #include "Image/Image.H"
00042 #include "Image/ShapeOps.H"
00043 #include "Image/DrawOps.H"
00044 #include "Neuro/StdBrain.H"
00045 #include "Neuro/VisualCortexConfigurator.H"
00046 #include "Devices/FrameGrabberConfigurator.H"
00047 #include "Devices/IEEE1394grabber.H"
00048 #include "Transport/FrameIstream.H"
00049 #include "Neuro/VisualCortex.H"
00050 #include "Neuro/NeuroOpts.H"
00051 #include "Media/TestImages.H"
00052 #include "Channels/DescriptorVec.H"
00053 #include "Channels/ComplexChannel.H"
00054 #include "Learn/Bayes.H"
00055 #include "ObjRec/BayesianBiaser.H"
00056 
00057 #include "GUI/XWinManaged.H"
00058 
00059 int train(nub::soft_ref<IEEE1394grabber> &gb, DescriptorVec &descVec, Bayes &bayesNet);
00060 int test(nub::soft_ref<IEEE1394grabber> &gb, DescriptorVec &descVec, Bayes &bayesNet);
00061 int classifyImage(Image<PixRGB<byte> > & img, DescriptorVec &descVec, Bayes &bayesNet);
00062 void learnImage(Image<PixRGB<byte> > & img, int cls, DescriptorVec &descVec, Bayes &bayesNet);
00063 
00064 void biasImage( bool biasVal, Bayes &bayesNet );
00065 
00066 Point2D<int> evolveBrain(Image<PixRGB<byte> > &img);
00067 
00068 ModelManager *mgr;
00069 #define NOBJ 2
00070 
00071 XWinManaged disp(Dims(320, 240), -1, -1, "Test Output 1");
00072 XWinManaged foveaDisp(Dims(256, 256), -1, -1, "Test Output 1");
00073 
00074 int main(const int argc, const char **argv)
00075 {
00076 
00077   MYLOGVERB = LOG_INFO;
00078   mgr = new ModelManager("Test ObjRec");
00079 
00080   //our brain
00081   nub::ref<StdBrain>  brain(new StdBrain(*mgr));
00082   mgr->addSubComponent(brain);
00083 
00084   //nub::soft_ref<FrameGrabberConfigurator>
00085   //  gbc(new FrameGrabberConfigurator(*mgr));
00086   nub::soft_ref<IEEE1394grabber>
00087     gb(new IEEE1394grabber(*mgr, "colorcam", "cocam"));
00088 
00089   mgr->addSubComponent(gb);
00090 
00091 
00092   gb->setModelParamVal("FrameGrabberSubChan", 0);
00093   gb->setModelParamVal("FrameGrabberBrightness", 128);
00094   gb->setModelParamVal("FrameGrabberHue", 180);
00095 
00096   mgr->exportOptions(MC_RECURSE);
00097   //mgr.setOptionValString(&OPT_RawVisualCortexChans, "IOC");
00098   //mgr.setOptionValString(&OPT_RawVisualCortexChans, "I");
00099   mgr->setOptionValString(&OPT_RawVisualCortexChans, "GNO");
00100   //mgr.setOptionValString(&OPT_RawVisualCortexChans, "N");
00101   //manager.setOptionValString(&OPT_UseOlderVersion, "false");
00102   // set the FOA and fovea radii
00103   mgr->setOptionValString(&OPT_SaliencyMapType, "Fast");
00104   mgr->setOptionValString(&OPT_WinnerTakeAllType, "Fast");
00105   mgr->setOptionValString(&OPT_SimulationTimeStep, "0.2");
00106 
00107   mgr->setModelParamVal("FOAradius", 50, MC_RECURSE);
00108   mgr->setModelParamVal("FoveaRadius", 50, MC_RECURSE);
00109 
00110 
00111   mgr->setOptionValString(&OPT_IORtype, "None");
00112 
00113   if (mgr->parseCommandLine(
00114         (const int)argc, (const char**)argv, "", 0, 0) == false);
00115 
00116   mgr->start();
00117 
00118   // get the frame grabber to start streaming:
00119 
00120   //Timer masterclock;                // master clock for simulations
00121   //Timer timer;
00122 
00123 
00124   ComplexChannel *cc =
00125     &*dynCastWeak<ComplexChannel>(brain->getVC());
00126 
00127   //Get a new descriptor vector
00128   DescriptorVec descVec(*mgr, "Descriptor Vector", "DecscriptorVec", cc);
00129 
00130   //Get  new classifier
00131         Bayes bayesNet(descVec.getFVSize(), NOBJ);
00132 
00133 
00134 
00135   //////////////////////////////////////////////// Main test Loop //////////////////////////////////////
00136 
00137 
00138   LINFO("Training");
00139   train(gb, descVec, bayesNet);
00140   bayesNet.save("objRecCoil.net");
00141   //bayesNet.load("objRec.net");
00142 
00143 
00144   LINFO("Testing");
00145  // getchar();
00146   test(gb, descVec, bayesNet);
00147 
00148 
00149 }
00150 
00151 
00152 int train(nub::soft_ref<IEEE1394grabber> &gb, DescriptorVec &descVec, Bayes &bayesNet)
00153 {
00154 
00155   //Train with only one image
00156   Dims trainSize(50, 50);
00157   descVec.setFoveaSize(trainSize);
00158 
00159   while(1)
00160   {
00161     Image< PixRGB<byte> > input = gb->readRGB();
00162  //   input = rescale(input, trainSize); //resize the image
00163 
00164     LINFO("Obj learning...");
00165     learnImage(input, 1, descVec, bayesNet);
00166   }
00167 
00168   return 0;
00169 }
00170 
00171 int test(nub::soft_ref<IEEE1394grabber> &gb, DescriptorVec &descVec, Bayes &bayesNet)
00172 {
00173 
00174   return 1;
00175 }
00176 
00177 
00178 int classifyImage(Image<PixRGB<byte> > & img, DescriptorVec &descVec, Bayes &bayesNet)
00179 {
00180   Point2D<int> winner = evolveBrain(img); //evolve the brain
00181 
00182   //get the descriptor
00183   descVec.setFovea(winner);
00184   descVec.buildDV(); //build the descriptor vector
00185 
00186   //get the resulting feature vector
00187   std::vector<double> FV = descVec.getFV();
00188 
00189   // for(uint i=0; i<FV.size(); i++)
00190   //     LINFO("FV: %f", FV[i]);
00191 
00192   //classify
00193 
00194   int cls = bayesNet.classify(FV);
00195 
00196   if (cls == -1) //check for errors
00197     return -1;
00198   else
00199     return cls;
00200 
00201 }
00202 
00203 void learnImage(Image<PixRGB<byte> > & img, int cls, DescriptorVec &descVec, Bayes &bayesNet)
00204 {
00205 
00206       Point2D<int> winner = evolveBrain(img); //evolve the brain
00207 
00208       drawCircle(img, winner, 25, PixRGB<byte>(255, 255, 0));
00209       disp.drawImage(img);
00210       //get the descriptor
00211 
00212       Point2D<int> loc = disp.getLastMouseClick();
00213       if (loc.isValid())
00214       {
00215         Dims WindowDims = disp.getDims();
00216         float newi = (float)loc.i * (float)img.getWidth()/(float)WindowDims.w();
00217         float newj = (float)loc.j * (float)img.getHeight()/(float)WindowDims.h();
00218         loc.i = (int)newi;
00219         loc.j = (int)newj;
00220         descVec.setFovea(loc);
00221         foveaDisp.drawImage(descVec.getFoveaImage());
00222 
00223         descVec.buildDV(); //build the descriptor vector
00224 
00225         //get the resulting feature vector
00226         std::vector<double> FV = descVec.getFV();
00227 
00228         double confi;
00229         int cls = bayesNet.classify(FV, &confi);
00230         //     for(uint i=0; i<FV.size(); i++)
00231         //    LINFO("FV: %f", FV[i]);
00232 
00233         LINFO("cls %i confi %f", cls, confi);
00234         bayesNet.learn(FV, 0u);
00235 
00236         if (confi > -40)
00237           biasImage(true, bayesNet);
00238 
00239       }
00240 
00241 
00242 
00243 
00244       /*
00245       descVec.buildDV(); //build the descriptor vector
00246 
00247       //get the resulting feature vector
00248       std::vector<double> FV = descVec.getFV();
00249 
00250  //     for(uint i=0; i<FV.size(); i++)
00251   //    LINFO("FV: %f", FV[i]);
00252 
00253       bayesNet.learn(FV, cls);*/
00254 
00255 }
00256 
00257 Point2D<int> evolveBrain(Image<PixRGB<byte> > &img)
00258 {
00259 
00260         nub::ref<StdBrain>  brain = dynCastWeak<StdBrain>(mgr->subComponent(0));
00261 
00262         if (mgr->started() && img.initialized()){         //give the image to the brain
00263                 brain->time();
00264 
00265                 brain->input(img);
00266 
00267                 bool keep_going = true;
00268                 while (keep_going){
00269                         const SimStatus status = brain->evolve();
00270                         if (status == SIM_BREAK) {
00271                                 LINFO("V %d\n", (int)(brain->time().msecs()) );
00272                                 keep_going = false;
00273                         }
00274                         if (brain->gotCovertShift()) // new attended location
00275                         {
00276 
00277                                 const Point2D<int> winner = brain->getLastCovertPos();
00278                                 const float winV = brain->getLastCovertAgmV();
00279 
00280                                 LINFO("##### Winner (%d,%d) at %fms : %.4f #####",
00281                                                 winner.i, winner.j, brain->time().msecs(), winV * 1000.0f);
00282 
00283         return winner;
00284 
00285                                 keep_going = false;
00286 
00287                         }
00288                         if (brain->time().secs() > 3.0) {
00289                                 LINFO("##### Time limit reached #####");
00290                                 keep_going = false;
00291                         }
00292                         LINFO("Evolve brain");
00293                 }
00294 
00295         }
00296 
00297   return Point2D<int>();
00298 
00299 }
00300 
00301 void biasImage( bool biasVal, Bayes &bayesNet )
00302 {
00303   nub::ref<StdBrain>  brain = dynCastWeak<StdBrain>(mgr->subComponent(0));
00304   ComplexChannel* cc = &*dynCastWeak<ComplexChannel>(brain->getVC());
00305 
00306   //Set mean and sigma to bias submap
00307   BayesianBiaser bb(bayesNet, 0, -1, biasVal);
00308   cc->accept(bb);
00309 
00310   //set the bias
00311   setSubmapAlgorithmBiased(*cc);
00312 }
00313 
00314 
Generated on Sun May 8 08:05:30 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3