app-egomotion.C

Go to the documentation of this file.
00001 /*! @file Beobot/app-egomotion.C application to demonstrate egomotion from
00002   visual data - input is MPEG or list (in gistlist format) of image files
00003   (assume .ppm files)
00004   Note: reading the velocity:
00005         right hand rule, x-positive to the left, y-positive to the north,
00006         z-positive forward                                              */
00007 // //////////////////////////////////////////////////////////////////// //
00008 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00009 // by the University of Southern California (USC) and the iLab at USC.  //
00010 // See http://iLab.usc.edu for information about this project.          //
00011 // //////////////////////////////////////////////////////////////////// //
00012 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00013 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00014 // in Visual Environments, and Applications'' by Christof Koch and      //
00015 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00016 // pending; application number 09/912,225 filed July 23, 2001; see      //
00017 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00018 // //////////////////////////////////////////////////////////////////// //
00019 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00020 //                                                                      //
00021 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00022 // redistribute it and/or modify it under the terms of the GNU General  //
00023 // Public License as published by the Free Software Foundation; either  //
00024 // version 2 of the License, or (at your option) any later version.     //
00025 //                                                                      //
00026 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00027 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00028 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00029 // PURPOSE.  See the GNU General Public License for more details.       //
00030 //                                                                      //
00031 // You should have received a copy of the GNU General Public License    //
00032 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00033 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00034 // Boston, MA 02111-1307 USA.                                           //
00035 // //////////////////////////////////////////////////////////////////// //
00036 //
00037 // Primary maintainer for this file: Christian Siagian <siagian@usc.edu>
00038 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Beobot/app-egomotion.C $
00039 // $Id: app-egomotion.C 14535 2011-02-18 22:40:51Z siagian $
00040 //
00041 
00042 #include "Beobot/Landmark.H"
00043 #include "Channels/ChannelOpts.H"
00044 #include "Component/GlobalOpts.H"
00045 #include "Component/ModelManager.H"
00046 #include "Component/ModelOptionDef.H"
00047 #include "Component/OptionManager.H"
00048 #include "GUI/XWinManaged.H"
00049 #include "Gist/FFN.H"
00050 #include "Gist/trainUtils.H"
00051 #include "Image/ColorOps.H"
00052 #include "Image/CutPaste.H"
00053 #include "Image/DrawOps.H"
00054 #include "Image/MathOps.H"
00055 #include "Image/Pixels.H"
00056 #include "Image/ShapeOps.H"
00057 #include "Image/Transforms.H"
00058 #include "Media/MPEGStream.H"
00059 #include "Media/MediaOpts.H"
00060 #include "Media/MediaSimEvents.H"
00061 #include "Neuro/GistEstimator.H"
00062 #include "Neuro/InferoTemporal.H"
00063 #include "Neuro/NeuroOpts.H"
00064 #include "Neuro/NeuroSimEvents.H"
00065 #include "Neuro/Retina.H"
00066 #include "Neuro/ShapeEstimator.H"
00067 #include "Neuro/ShapeEstimatorModes.H"
00068 #include "Neuro/SpatialMetrics.H"
00069 #include "Neuro/StdBrain.H"
00070 #include "Neuro/gistParams.H"
00071 #include "Neuro/VisualCortex.H"
00072 #include "Raster/Raster.H"
00073 #include "SIFT/CameraIntrinsicParam.H"
00074 #include "SIFT/Histogram.H"
00075 #include "SIFT/Keypoint.H"
00076 #include "SIFT/VisualObject.H"
00077 #include "SIFT/VisualObjectDB.H"
00078 #include "Simulation/SimEventQueueConfigurator.H"
00079 #include "Util/Timer.H"
00080 
00081 #include <iostream>
00082 #include <fstream>
00083 
00084 
00085 #include "SIFT/SIFTegomotion.H"
00086 
00087 #define DB_NAME "out_database"
00088 
00089 #define W_ASPECT_RATIO  320 // ideal minimum width for display
00090 #define H_ASPECT_RATIO  240 // ideal minimum height for display
00091 
00092 FeedForwardNetwork *ffn_place;
00093 double **gistW   = NULL;
00094 
00095 CloseButtonListener wList;
00096 rutz::shared_ptr<XWinManaged> salWin;
00097 rutz::shared_ptr<XWinManaged> objWin;
00098 rutz::shared_ptr<XWinManaged> trajWin;
00099 
00100 int wDisp, hDisp, sDisp, scaleDisp;
00101 int wDispWin,  hDispWin;
00102 
00103 // gist display
00104 int pcaW = 16, pcaH = 5;
00105 int winBarW = 5, winBarH = 25;
00106 
00107 // number of landmarks produced
00108 int numObj = 0;
00109 
00110 // ######################################################################
00111 void                  setupDispWin     (int w, int h);
00112 Image< PixRGB<byte> > getSalDispImg    (Image< PixRGB<byte> > img, Image<float> roiImg,
00113                                         Image< PixRGB<byte> > objImg, Image< PixRGB<byte> > objImg2);
00114 void                  processSalCue    (Image<PixRGB<byte> > inputImg,
00115                                         nub::soft_ref<StdBrain> brain, Point2D<int> winner, int fNum,
00116                                         std::vector< rutz::shared_ptr<Landmark> >& landmarks,
00117                                         const Image<float>& semask, const std::string& selabel);
00118 void                  getGistFileList  (std::string fName,  std::vector<std::string>& tag,
00119                                         std::vector<int>& start, std::vector<int>& num);
00120 Image< PixRGB<byte> > getTrajImg       (std::vector<Image <double> > traj, int w, int h);
00121 
00122 // ######################################################################
00123 
00124 // Main function
00125 /*! Load a database, enrich it with new VisualObject entities
00126   extracted from the given images, and save it back. */
00127 int main(const int argc, const char **argv)
00128 {
00129   MYLOGVERB = LOG_INFO;  // suppress debug messages
00130 
00131   // Instantiate a ModelManager:
00132   ModelManager manager("Egomotion Model");
00133 
00134   // we cannot use saveResults() on our various ModelComponent objects
00135   // here, so let's not export the related command-line options.
00136   manager.allowOptions(OPTEXP_ALL & (~OPTEXP_SAVE));
00137 
00138   // Instantiate our various ModelComponents:
00139   nub::soft_ref<SimEventQueueConfigurator>
00140     seqc(new SimEventQueueConfigurator(manager));
00141   manager.addSubComponent(seqc);
00142 
00143   nub::soft_ref<InputMPEGStream>
00144     ims(new InputMPEGStream(manager, "Input MPEG Stream", "InputMPEGStream"));
00145   manager.addSubComponent(ims);
00146 
00147   nub::soft_ref<StdBrain> brain(new StdBrain(manager));
00148   manager.addSubComponent(brain);
00149 
00150   nub::ref<SpatialMetrics> metrics(new SpatialMetrics(manager));
00151   manager.addSubComponent(metrics);
00152 
00153   manager.exportOptions(true);
00154   metrics->setFOAradius(30); // FIXME
00155   metrics->setFoveaRadius(30); // FIXME
00156   manager.setOptionValString(&OPT_MaxNormType, "FancyOne");
00157   manager.setOptionValString(&OPT_UseRandom, "false");
00158 
00159   manager.setOptionValString(&OPT_IORtype, "Disc");
00160   manager.setOptionValString(&OPT_RawVisualCortexChans,"OIC");
00161 
00162   // customize the region considered part of the "object"
00163   //  manager.setOptionValString("ShapeEstimatorMode","SaliencyMap");
00164   //  manager.setOptionValString(&OPT_ShapeEstimatorMode,"ConspicuityMap");
00165   manager.setOptionValString(&OPT_ShapeEstimatorMode, "FeatureMap");
00166   manager.setOptionValString(&OPT_ShapeEstimatorSmoothMethod, "Chamfer");
00167   //manager.setOptionValString(&OPT_ShapeEstimatorSmoothMethod, "Gaussian");
00168 
00169   // DO NOT set up the INFEROTEMPORAL
00170   //manager.setOptionValString(&OPT_InferoTemporalType,"Std");
00171   //manager.setOptionValString(&OPT_AttentionObjRecog,"yes");
00172   //manager.setOptionValString(&OPT_MatchObjects,"false");
00173 
00174   // Request a bunch of option aliases (shortcuts to lists of options):
00175   REQUEST_OPTIONALIAS_NEURO(manager);
00176 
00177   // Parse command-line:
00178   if (manager.parseCommandLine(argc, argv, "<*.mpg or *_gistList.txt>",
00179                                1, 1) == false)
00180     return(1);
00181 
00182   nub::soft_ref<SimEventQueue> seq = seqc->getQ();
00183 
00184   // if the file passed ends with _gistList.txt
00185   // we have a different protocol
00186   bool isGistListInput = false;
00187   int ifLen = manager.getExtraArg(0).length();
00188   if(ifLen > 13 &&
00189      manager.getExtraArg(0).find("_gistList.txt",ifLen - 13) != std::string::npos)
00190     isGistListInput = true;
00191 
00192   // NOTE: this could now be controlled by a command-line option
00193   // --preload-mpeg=true
00194   manager.setOptionValString(&OPT_InputMPEGStreamPreload, "true");
00195 
00196   // do post-command-line configs:
00197   std::vector<std::string> tag;
00198   std::vector<int> start;
00199   std::vector<int> num;
00200   if(isGistListInput)
00201     {
00202       LINFO("we have a gistList input");
00203       getGistFileList(manager.getExtraArg(0).c_str(), tag, start, num);
00204     }
00205   else
00206     {
00207       LINFO("we have an mpeg input");
00208       ims->setFileName(manager.getExtraArg(0));
00209       manager.setOptionValString(&OPT_InputFrameDims,
00210                                  convertToString(ims->peekDims()));
00211     }
00212 
00213   // frame delay in seconds
00214   double rtdelay = 33.3667/1000.0;    // real time
00215   double fdelay  = rtdelay * 3;           // 3 times slower than real time
00216   (void)fdelay;
00217 
00218   // let's get all our ModelComponent instances started:
00219   manager.start();
00220 
00221   // create a landmark covering the whole scene
00222   rutz::shared_ptr<Landmark> scene(new Landmark());
00223   scene->setMatchWin(objWin);
00224 
00225   // we HARD CODE the camera intrinsic parameter FOR BEOBOT
00226   rutz::shared_ptr<CameraIntrinsicParam>
00227     cip(new CameraIntrinsicParam(435.806712867904707, 438.523234664943345,
00228                                  153.585228257964644,  83.663180940275609, 0.0));
00229   scene->setCameraIntrinsicParam(cip);
00230 
00231   // main loop:
00232   SimTime prevstime = SimTime::ZERO(); uint fNum = 0;
00233   Image< PixRGB<byte> > inputImg;
00234   Image< PixRGB<byte> > dispImg;
00235   int w = 0;  // 320 or iDims.w() - 50 + 1;
00236   int h = 0;  // 240 or iDims.h();
00237   unsigned int cLine = 0;
00238   int cIndex = start[0];
00239   std::string folder =  "";
00240   std::string::size_type sPos = manager.getExtraArg(0).rfind("/",ifLen);
00241   if(sPos != std::string::npos)
00242     folder = manager.getExtraArg(0).substr(0,sPos+1);
00243   std::vector<Image<double> > traj;
00244   while(1)
00245   {
00246      // has the time come for a new frame?
00247      // If we want to SLOW THINGS DOWN change fdelay
00248      if (fNum == 0 ||
00249         (seq->now() - 0.5 * (prevstime - seq->now())).secs() - fNum * fdelay > fdelay)
00250        {
00251          // load new frame
00252          std::string fName;
00253          if(isGistListInput)
00254            {
00255              if (cLine >= tag.size()) break;  // end of input list
00256 
00257              // open the current file
00258              char tNumStr[100]; sprintf(tNumStr,"%06d",cIndex);
00259              fName = folder + tag[cLine] + std::string(tNumStr) + ".ppm";
00260 
00261              inputImg = Raster::ReadRGB(fName);
00262              cIndex++;
00263 
00264              if(cIndex >= start[cLine] + num[cLine])
00265                {
00266                  cLine++;
00267                  if (cLine < tag.size()) cIndex = start[cLine];
00268                }
00269 
00270              // reformat the file name to a gist name
00271              int fNameLen = fName.length();
00272              unsigned int uPos = fName.rfind("_",fNameLen);
00273              fName = fName.substr(0,uPos)+ ".ppm";
00274            }
00275          else
00276            {
00277              fName = manager.getExtraArg(0);
00278              inputImg = ims->readRGB();
00279              if (inputImg.initialized() == false) break;  // end of input stream
00280              // format new frame
00281              inputImg = crop(inputImg,
00282                              Rectangle::tlbrI(0,25,inputImg.getHeight()-1, inputImg.getWidth()-25));
00283              cIndex = fNum+1;
00284            }
00285 
00286          // setup  display  at the start of stream
00287          // NOTE: wDisp, hDisp, and sDisp are modified
00288          if (fNum == 0)
00289            {
00290              w = inputImg.getWidth(); h = inputImg.getHeight();
00291              setupDispWin(w, h); LINFO("w: %d, h: %d",w, h);
00292            }
00293 
00294          dispImg = inputImg;
00295          salWin->drawImage(dispImg,0,0);
00296          LINFO("\nnew frame :%d",fNum);
00297 
00298          // take out frame borders NOTE: ONLY FOR SONY CAMCORDER
00299          //inputImg = crop(inputImg, Rectangle::tlbrI(0, 25, h-1, 25 + w - 1));
00300 
00301          // pass input to brain:
00302          rutz::shared_ptr<SimEventInputFrame>
00303            e(new SimEventInputFrame(brain.get(), GenericFrame(inputImg), 0));
00304          seq->post(e); //post the image to the brain
00305 
00306          // track the view
00307          std::string viewName(sformat("view%07d", fNum));
00308          rutz::shared_ptr<VisualObject>
00309            cv(new VisualObject(viewName, "", inputImg));
00310          rutz::shared_ptr<VisualObjectMatch> cmatch = scene->build(cv, fNum);
00311 
00312          // CHECK EGOMOTION
00313          if(fNum != 0)
00314            {
00315              rutz::shared_ptr<SIFTegomotion>
00316                egm(new SIFTegomotion(cmatch, cip, objWin));
00317 
00318              // reading the velocity:
00319              // right hand rule, x-positive to the left,
00320              // y-positive to the north, z-positive forward
00321              traj.push_back(egm->getItsVel());
00322              egm->print(traj[traj.size() -1] ,"final velocity");
00323              trajWin->drawImage(getTrajImg(traj, 5*w, 2*h),0,0);
00324              //Raster::waitForKey();
00325            }
00326 
00327          // increment frame count
00328          fNum++;
00329        }
00330 
00331     // evolve brain:
00332     prevstime = seq->now(); // time before current step
00333     const SimStatus status = seq->evolve();
00334 
00335     // process SALIENT location found
00336     if (SeC<SimEventWTAwinner> e = seq->check<SimEventWTAwinner>(0))
00337       {
00338         const Point2D<int> winner = e->winner().p;
00339         LINFO("Frame: %d, winner: (%d,%d)", fNum, winner.i, winner.j);
00340 
00341         Image<float> semask; std::string selabel;
00342         if (SeC<SimEventShapeEstimatorOutput>
00343             e = seq->check<SimEventShapeEstimatorOutput>(0))
00344           { semask = e->smoothMask(); selabel = e->winningLabel(); }
00345 
00346         //processSalCue(inputImg, brain, winner, fNum, landmarks,
00347         // semask, selabel);
00348 
00349         if (SIM_BREAK == status) // Brain decided it's time to quit
00350           break;
00351       }
00352   }
00353 
00354   // save the resulting database:
00355   //if(vdb->numObjects() != 0)
00356   //  vdb->saveTo(DB_NAME);
00357 
00358   // stop all our ModelComponents
00359   manager.stop();
00360 
00361   // all done!
00362   return 0;
00363 }
00364 
00365 // ######################################################################
00366 // process salient cues
00367 void processSalCue(const Image<PixRGB<byte> > inputImg,
00368                    nub::soft_ref<StdBrain> brain, Point2D<int> winner, int fNum,
00369                    std::vector< rutz::shared_ptr<Landmark> >& landmarks,
00370                    const Image<float>& semask, const std::string& selabel)
00371 {
00372   // segment out the object -> maybe port to infero-temporal later
00373   // ----------------------------------------------
00374 
00375   // use Shape estimator to focus on the attended region
00376   Image<float> roiImg;
00377   Image<PixRGB<byte> > objImgSE;
00378   Point2D<int> objOffsetSE;
00379   if (semask.initialized())
00380     {
00381       roiImg = semask * luminance(inputImg);
00382 
00383       float mn, mx; getMinMax(semask,mn,mx);
00384       Rectangle r = findBoundingRect(semask, mx*.05f);
00385       Image<PixRGB<byte> > objImgSE = crop(inputImg, r);
00386       objOffsetSE = Point2D<int>(r.left(),r.top());
00387     }
00388   else
00389     {
00390       LINFO("SE Smooth Mask not yet initialized");
00391       roiImg = luminance(inputImg);
00392       objImgSE = inputImg;
00393       objOffsetSE = Point2D<int>(0,0);
00394     }
00395 
00396   // ----------------------------------------------
00397   // or with pre-set window
00398   Rectangle roi =
00399     Rectangle::tlbrI(winner.j - 50, winner.i - 50,
00400                     winner.j + 50, winner.i + 50);
00401   roi = roi.getOverlap(inputImg.getBounds());
00402   LINFO("[%d,%d,%d,%d]",roi.top(),roi.left(),roi.bottomI(),roi.rightI());
00403   Image<PixRGB<byte> > objImgWIN =  crop(inputImg, roi);
00404   Point2D<int> objOffsetWIN(roi.left(),roi.top());
00405 
00406   // ----------------------------------------------
00407 
00408   LINFO("TOP LEFT at: SE:(%d,%d) WIN:(%d,%d)",
00409         objOffsetSE.i  , objOffsetSE.j,
00410         objOffsetWIN.i , objOffsetWIN.j);
00411 
00412   // draw the results
00413   drawCircle(roiImg, winner, 10, 0.0f, 1);
00414   drawPoint(roiImg, winner.i, winner.j, 0.0f);
00415   LINFO("Frame: %d, winner: (%d,%d) in %s", fNum, winner.i, winner.j,
00416         selabel.c_str());
00417   salWin->drawImage(getSalDispImg(inputImg,roiImg,objImgWIN,objImgSE),0,0);
00418   //salWin->drawImage(Image<PixRGB<byte> >(inputImg.getDims() * 2, ZEROS) ,0,0);
00419   Raster::waitForKey();
00420 
00421   // WE CHOOSE: SE
00422   Image<PixRGB<byte> > objImg(objImgSE);
00423   Point2D<int> objOffset(objOffsetSE);
00424 
00425   // need a Visual Cortex to obtain the feature vector
00426   LFATAL("fixme using a SimReq");
00427   //////////nub::soft_ref<VisualCortex> vc = brain->getVC();
00428   std::vector<float> fvec; ////////vc->getFeatures(winner, fvec);
00429 
00430   // create a new VisualObject (a set of SIFT keypoints)
00431   // with the top-left coordinate of the window
00432   rutz::shared_ptr<VisualObject>
00433     obj(new VisualObject("NewObject", "NewObject",
00434                          objImg, winner - objOffset, fvec));
00435 
00436   std::string objName(sformat("obj%07d", numObj));
00437   obj->setName(objName);
00438   obj->setImageFname(objName + ".png");
00439   numObj++;
00440 
00441   // check with the salient regions DB before adding
00442   int trackAccepted = 0;
00443   LINFO("we have: %"ZU" landmarks to match", landmarks.size());
00444   for(uint i = 0; i < landmarks.size(); i++)
00445     {
00446        LINFO("tracking landmark number: %d",i);
00447        rutz::shared_ptr<VisualObjectMatch> cmatch =
00448          landmarks[i]->build(obj, objOffset, fNum);
00449        if(cmatch.is_valid() && cmatch->getScore() > 3.0)
00450          trackAccepted++;
00451     }
00452 
00453   // if it's not used by any of the existing landmarks entry
00454   if(trackAccepted == 0)
00455     {
00456       // create a new one
00457       LINFO("create a new Landmark number %"ZU,landmarks.size());
00458       std::string lmName(sformat("landmark%07"ZU, landmarks.size()));
00459       rutz::shared_ptr<Landmark>
00460         newlm(new Landmark(obj, objOffset, fNum, lmName));
00461       newlm->setMatchWin(objWin);
00462       landmarks.push_back(newlm);
00463       Raster::waitForKey();
00464     }
00465   else if(trackAccepted > 1)
00466     {
00467        LINFO("May have: %d objects jumbled together", trackAccepted);
00468     }
00469 }
00470 
00471 // ######################################################################
00472 void getGistFileList(std::string fName,  std::vector<std::string>& tag,
00473                      std::vector<int>& start, std::vector<int>& num)
00474 {
00475   char comment[200]; FILE *fp;  char inLine[100];
00476 
00477   // open the file
00478   if((fp = fopen(fName.c_str(),"rb")) == NULL)
00479     LFATAL("samples file: %s not found",fName.c_str());
00480   LINFO("fName: %s",fName.c_str());
00481 
00482   // get number of samples
00483   int nSamples; if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed"); sscanf(inLine, "%d %s", &nSamples, comment);
00484 
00485   // the number of categories
00486   int tNcat; if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed"); sscanf(inLine, "%d %s", &tNcat, comment);
00487 
00488   // get the type of ground truth
00489   char gtOpt[100]; if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed"); sscanf(inLine, "%s %s", gtOpt, comment);
00490 
00491   // skip column headers
00492   if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed");
00493 
00494   char cName[100]; char ext[100];  int cStart, cNum; int gTruth;
00495   while(fgets(inLine, 1000, fp) != NULL)
00496   {
00497     // get the files in this category and ground truth
00498     sscanf(inLine, "%s %d %d %d %s", cName, &cStart, &cNum,  &gTruth, ext);
00499     LINFO("    sName: %s %d %d %d %s",cName, cStart, cNum, gTruth, ext);
00500 
00501     tag.push_back(cName);
00502     start.push_back(cStart);
00503     num.push_back(cNum);
00504   }
00505   fclose(fp);
00506 }
00507 
00508 // ######################################################################
00509 // setup display window for visualization purposes
00510 void setupDispWin(int w, int h)
00511 {
00512   salWin.reset(new XWinManaged(Dims(2*w, 2*h), 2*w, 0, "Saliency Related" ));
00513   wList.add(*salWin);
00514 
00515   objWin.reset(new XWinManaged(Dims(2*w, 2*h), 0, 0, "Object Match" ));
00516   wList.add(*objWin);
00517 
00518   trajWin.reset(new XWinManaged(Dims(5*w, 2*h), 0, 0, "Trajectory" ));
00519   wList.add(*objWin);
00520 }
00521 
00522 // ######################################################################
00523 // get saliency display image for visualization purposes
00524 Image< PixRGB<byte> > getSalDispImg   (Image< PixRGB<byte> > img,
00525                                        Image<float> roiImg,
00526                                        Image< PixRGB<byte> > objImg,
00527                                        Image< PixRGB<byte> > objImg2)
00528 {
00529   int w = img.getWidth(), h = img.getHeight();
00530   Image< PixRGB<byte> > salDispImg(2*w,2*h,ZEROS);
00531 
00532   inplacePaste(salDispImg, img,        Point2D<int>(0, 0));
00533   Image< PixRGB<byte> > t = makeRGB(roiImg,roiImg,roiImg);
00534   inplacePaste(salDispImg, t,          Point2D<int>(0, h));
00535   inplacePaste(salDispImg, objImg,     Point2D<int>(w, 0));
00536   inplacePaste(salDispImg, objImg2,     Point2D<int>(w, h));
00537 
00538   return salDispImg;
00539 }
00540 
00541 // ######################################################################
00542 // get trajectory image for visualization purposes
00543 Image< PixRGB<byte> > getTrajImg (std::vector<Image <double> > traj, int w, int h)
00544 {
00545   Image< PixRGB<byte> > trajImg(w, h, ZEROS);
00546 
00547   int sX = 10; int sY = h/2;
00548 
00549   // velocity is relative in the range of -1.0 to 1.0
00550   double scale = 5.0;
00551   double locX = double(sX);
00552   double locY = double(sY);
00553 
00554   // draw each trajectory in the history
00555   for(uint i = 0;  i < traj.size(); i++)
00556     {
00557       double dX =  traj[i].getVal(0,2)*scale;
00558       double dY = -traj[i].getVal(0,0)*scale;
00559       LINFO("%d. %f,%f -> dx: %f, dy: %f ", i, traj[i].getVal(0,2), traj[i].getVal(0,0),
00560             dX,dY);
00561 
00562       drawDisk(trajImg, Point2D<int>(int(locX),int(locY)), 2, PixRGB<byte>(255,0,0));
00563       drawLine (trajImg,
00564                 Point2D<int>(int(locX),int(locY)),
00565                 Point2D<int>(int(locX + dX),int(locY + dY)),
00566                 PixRGB<byte>(255,255,255),1);
00567       locX = locX + dX;
00568       locY = locY + dY;
00569     }
00570 
00571   return trajImg;
00572 }
00573 
00574 // ######################################################################
00575 /* So things look consistent in everyone's emacs... */
00576 /* Local Variables: */
00577 /* indent-tabs-mode: nil */
00578 /* End: */
Generated on Sun May 8 08:40:11 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3