SimulationViewerEyeMvt2.C

Go to the documentation of this file.
00001 /*!@file Neuro/SimulationViewerEyeMvt2.C comparison between saliency and
00002   human eye movements */
00003 
00004 // //////////////////////////////////////////////////////////////////// //
00005 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2003   //
00006 // by the University of Southern California (USC) and the iLab at USC.  //
00007 // See http://iLab.usc.edu for information about this project.          //
00008 // //////////////////////////////////////////////////////////////////// //
00009 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00010 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00011 // in Visual Environments, and Applications'' by Christof Koch and      //
00012 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00013 // pending; application number 09/912,225 filed July 23, 2001; see      //
00014 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00015 // //////////////////////////////////////////////////////////////////// //
00016 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00017 //                                                                      //
00018 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00019 // redistribute it and/or modify it under the terms of the GNU General  //
00020 // Public License as published by the Free Software Foundation; either  //
00021 // version 2 of the License, or (at your option) any later version.     //
00022 //                                                                      //
00023 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00024 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00025 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00026 // PURPOSE.  See the GNU General Public License for more details.       //
00027 //                                                                      //
00028 // You should have received a copy of the GNU General Public License    //
00029 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00030 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00031 // Boston, MA 02111-1307 USA.                                           //
00032 // //////////////////////////////////////////////////////////////////// //
00033 //
00034 // Primary maintainer for this file: Laurent Itti <itti@usc.edu>
00035 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Neuro/SimulationViewerEyeMvt2.C $
00036 // $Id: SimulationViewerEyeMvt2.C 14376 2011-01-11 02:44:34Z pez $
00037 //
00038 
00039 #include "Neuro/SimulationViewerEyeMvt2.H"
00040 
00041 #include "Channels/ChannelBase.H"
00042 #include "Channels/ChannelOpts.H"
00043 #include "Component/OptionManager.H"
00044 #include "Image/ColorOps.H"    // for contrastModulate()
00045 #include "Image/CutPaste.H"    // for concatX()
00046 #include "Image/DrawOps.H"     // for colGreyCombo()
00047 #include "Image/MathOps.H"     // for takeMax()
00048 #include "Image/ShapeOps.H"    // for rescale()
00049 #include "Image/Transforms.H"  // for segmentObjectClean(), contour2D()
00050 #include "Neuro/AttentionGuidanceMap.H"
00051 #include "Neuro/Brain.H"
00052 #include "Neuro/NeuroOpts.H"
00053 #include "Neuro/NeuroSimEvents.H"
00054 #include "Media/MediaSimEvents.H"
00055 #include "Neuro/Retina.H"
00056 #include "Neuro/SaccadeControllers.H"
00057 #include "Neuro/EyeHeadController.H"
00058 #include "Neuro/SaliencyMap.H"
00059 #include "Neuro/SpatialMetrics.H"
00060 #include "Neuro/TaskRelevanceMap.H"
00061 #include "Neuro/VisualBuffer.H"
00062 #include "Neuro/VisualCortex.H"
00063 #include "Psycho/EyeData.H"
00064 #include "Transport/FrameInfo.H"
00065 #include "Transport/FrameOstream.H"
00066 
00067 #include <stdio.h>
00068 
00069 // ######################################################################
00070 SimulationViewerEyeMvt2::
00071 SimulationViewerEyeMvt2(OptionManager& mgr,
00072                         const std::string& descrName,
00073                         const std::string& tagName) :
00074   SimulationViewer(mgr, descrName, tagName),
00075   SIMCALLBACK_INIT(SimEventInputFrame),
00076   SIMCALLBACK_INIT(SimEventRetinaImage),
00077   SIMCALLBACK_INIT(SimEventWTAwinner),
00078   SIMCALLBACK_INIT(SimEventSaccadeStatusEye),
00079   SIMCALLBACK_INIT(SimEventSaveOutput),
00080   itsMetrics(new SpatialMetrics(mgr)),
00081   itsSaveMegaCombo(&OPT_SVEMsaveMegaCombo, this),
00082   itsColorEye("SVcolorHumanEye", this, PixRGB<byte>(128, 255, 255)),
00083   itsOutFname(&OPT_SVEMoutFname, this),
00084   itsLevelSpec(&OPT_LevelSpec, this),
00085   itsUseIOR(&OPT_SVEMuseIOR, this),
00086   itsTraj(), itsRawTraj(),
00087   itsOutFile(NULL), itsLastSample(-1, -1), itsLastRadius(0),
00088   itsIORmask(),
00089   itsDidAttentionShift(false)
00090 {
00091   this->addSubComponent(itsMetrics);
00092 
00093   LINFO("NOTE: Selecting EyeHeadController of type EyeTrack");
00094   getManager().setOptionValString(&OPT_EyeHeadControllerType, "EyeTrack");
00095 }
00096 
00097 // ######################################################################
00098 SimulationViewerEyeMvt2::~SimulationViewerEyeMvt2()
00099 { }
00100 
00101 // ######################################################################
00102 void SimulationViewerEyeMvt2::start1()
00103 {
00104   // abort if no output file:
00105   if (itsOutFname.getVal().empty()) LFATAL("I need an output file!");
00106 
00107   // open output file:
00108   itsOutFile = fopen(itsOutFname.getVal().c_str(), "w");
00109   if (itsOutFile == NULL) PLFATAL("Cannot write '%s'", itsOutFname.getVal().c_str());
00110 
00111   SimulationViewer::start1();
00112 }
00113 
00114 // ######################################################################
00115 void SimulationViewerEyeMvt2::stop1()
00116 {
00117   if (itsOutFile) { fclose(itsOutFile); itsOutFile = NULL; }
00118 
00119   SimulationViewer::stop1();
00120 }
00121 
00122 // ######################################################################
00123 void SimulationViewerEyeMvt2::
00124 onSimEventInputFrame(SimEventQueue& q, rutz::shared_ptr<SimEventInputFrame>& e)
00125 {
00126   // reset our drawings:
00127   itsRawTraj = e->frame().asRgb();
00128 }
00129 
00130 
00131 // ######################################################################
00132 void SimulationViewerEyeMvt2::
00133 onSimEventRetinaImage(SimEventQueue& q, rutz::shared_ptr<SimEventRetinaImage>& e)
00134 {
00135   // Reset our drawings
00136   itsTraj = e->frame().colorByte();
00137 }
00138 
00139 // ######################################################################
00140 void SimulationViewerEyeMvt2::
00141 onSimEventWTAwinner(SimEventQueue& q, rutz::shared_ptr<SimEventWTAwinner>& e)
00142 {
00143   const WTAwinner& win = e->winner();
00144 
00145   // do some drawings:
00146   drawCircle(itsTraj, win.p, itsMetrics->getFOAradius(), PixRGB<byte>(0, 127 + (win.boring?0:128), 0), 2);
00147   itsDidAttentionShift = true;
00148 }
00149 
00150 // ######################################################################
00151 void SimulationViewerEyeMvt2::
00152 onSimEventSaccadeStatusEye(SimEventQueue& q, rutz::shared_ptr<SimEventSaccadeStatusEye>& e)
00153 {
00154   // grab the latest visual buffer:
00155   if (SeC<SimEventVisualBufferOutput> ebuf = q.check<SimEventVisualBufferOutput>(this, SEQ_ANY)) {
00156     Image<float> buf = ebuf->buffer();
00157     const int maplevel = ebuf->smlev();
00158 
00159     // also grab the latest retina output:
00160     if (SeC<SimEventRetinaImage> eret = q.check<SimEventRetinaImage>(this, SEQ_ANY)) {
00161       const Dims& dims = buf.getDims();
00162       const Point2D<int>& p = e->position();
00163 
00164       // select a drawing color & size:
00165       PixRGB<byte> col(itsColorEye.getVal()); int psiz = 5;
00166       if (transientStatusIsOn(e->saccadeStatus())) { col.setGreen(0); col.setRed(255); }
00167       if (transientStatusIsOn(e->blinkStatus())) { col.setBlue(0); }
00168       PixRGB<byte> bk(0, 0, 0);
00169       drawPatch(itsTraj, p, psiz + 2, bk);
00170       drawPatch(itsTraj, p, psiz, col);
00171       Point2D<int> rawp = eret->retinalToRaw(p);
00172       drawPatch(itsRawTraj, rawp, psiz + 2, bk);
00173       drawPatch(itsRawTraj, rawp, psiz, col);
00174 
00175       // get the latest eye movement data:
00176       Point2D<int> nextTarg; double ampl = 0.0;
00177       if (SeC<SimEventEyeTrackerData> etrac = q.check<SimEventEyeTrackerData>(this, SEQ_ANY)) {
00178         rutz::shared_ptr<EyeData> data = etrac->data();
00179         if (data->hasSaccadeTargetData()) {
00180           nextTarg = data->saccadeTarget();
00181           ampl = data->saccadeAmplitude();
00182           data->saccadeDuration();
00183         } else return;
00184       } else return;
00185 
00186       // get buffer-centered coords of our current eye position:
00187       Point2D<int> curr = ebuf->retinalToBuffer(p);
00188 
00189       // are we starting a saccade?
00190       if (ampl > 0.0)
00191         {
00192           // the coords we have from the SC are retinotopic; transform
00193           // into buffer-centered:
00194           itsLastSample = ebuf->retinalToBuffer(nextTarg);
00195           itsLastRadius = (itsMetrics->getFoveaRadius() + (1 << maplevel) - 1) >> maplevel;
00196 
00197           // check that saccade target is okay:
00198           if (buf.coordsOk(itsLastSample) == false)
00199             {
00200               LERROR("Hum, saccade target at (%d, %d)? -- CLAMPING", itsLastSample.i, itsLastSample.j);
00201               itsLastSample.clampToDims(dims);
00202             }
00203           float mi, ma, av; getMinMaxAvg(buf, mi, ma, av);
00204 
00205           // mask the buffer by a disk of radius FOAradius+stdev around
00206           // the saccade target location and compute the max inside:
00207           Image<float> fov(dims, ZEROS);
00208           drawCircle(itsTraj, nextTarg, itsMetrics->getFoveaRadius(), PixRGB<byte>(255, 255, 0), 2);
00209           float salience = getLocalMax(buf, itsLastSample, itsLastRadius);
00210 
00211           if (itsUseIOR.getVal())
00212             {
00213               // the human may or may not have turned on IOR, we don't really
00214               // know. Here let's just estimate what would happen if we
00215               // trigger IOR at our current location. If it improves, we'll
00216               // assume it was done. Improvement means that our measured
00217               // salience at the target location does not change, but the
00218               // average (and possibly max) over the entire image will go down
00219               // since we have eliminated our potentially very salient current
00220               // location. We first cut off anything less salient than 1/4 the
00221               // max salience, to make sure the segmentation will not spread
00222               // too far.
00223               Image<float> buf2(buf);
00224               inplaceLowThresh(buf2, ma * 0.25F, 0.0F);
00225               if (buf2.getVal(curr) > ma * 0.05F)
00226                 {
00227                   itsIORmask = segmentObjectClean(buf2, curr);
00228                   Image<float> iormaskf(itsIORmask);
00229                   inplaceNormalize(iormaskf, 0.0F, 1.0F);
00230                   buf2 = buf * binaryReverse(iormaskf, 1.0F);
00231                   float iorsal = getLocalMax(buf2, itsLastSample, itsLastRadius);
00232 
00233                   if (fabs(iorsal - salience) < ma * 1.0e-4F)
00234                     {
00235                       LINFO("### Triggering IOR in Visual Buffer ###");
00236                       /*
00237                       // update min/max/avg/salience computation
00238                       itsBuffer->inhibit(iormaskf);
00239                       buf = itsBuffer->getBuffer();
00240                       getMinMaxAvg(buf, mi, ma, av);
00241                       salience = getLocalMax(buf, itsLastSample, itsLastRadius);
00242                       */
00243                     }
00244                 }
00245             }
00246 
00247           // get salience we would obtain with a random
00248           // saccade of random amplitude:
00249           Point2D<int> p(randomUpToNotIncluding(dims.w()), randomUpToNotIncluding(dims.h()));
00250           drawCircle(itsTraj, ebuf->bufferToRetinal(p), itsMetrics->getFoveaRadius(),
00251                      PixRGB<byte>(255, 255, 255), 2);
00252           /*
00253          float rndsal2 = getLocalMax(buf, p, itsLastRadius);
00254           float rnddist = p.distance(itsLastSample) * (1<<maplevel);
00255 
00256           // get location that our buffer would predict is the best target
00257           // for a saccade right now:
00258 
00259           p = itsBuffer->findMostInterestingTargetLocMax(curr);
00260           drawPatch(itsTraj, ebuf->bufferToRetinal(p), psiz + 2, bk);
00261           drawPatch(itsTraj, ebuf->bufferToRetinal(p), psiz, PixRGB<byte>(0, 0, 255));
00262           float dist = p.distance(itsLastSample) * (1<<maplevel);
00263 
00264           // save vbuf contents at eye:
00265           fprintf(itsOutFile, "%g %g %g %g  %g %g %g  %g %g\n",
00266                   salience, mi, ma, av, ampl, durs, rndsal2, dist, rnddist);
00267           fflush(itsOutFile);
00268           */
00269         }
00270     }
00271   }
00272 }
00273 
00274 // ######################################################################
00275 Image< PixRGB<byte> > SimulationViewerEyeMvt2::getTraj(SimEventQueue& q)
00276 {
00277   /*
00278   PixRGB<byte> bgcol(64, 128, 0); // background color
00279   PixRGB<byte> gridcol(255, 255, 255); // grid color
00280 
00281   Dims tdims = itsTraj.getDims();
00282   int maplevel = itsLevelSpec.getVal().mapLevel();
00283   Image<float> sm = getMap(q);
00284   Image< PixRGB<byte> > smc = toRGB(Image<byte>(rescaleOpt(sm, tdims, itsDisplayInterp.getVal())));
00285 
00286   // get the saliency mask (will show a blob depicting the area that
00287   // gets transferred from saliency map to buffer at each attention
00288   // shift):
00289   Image<byte> mask;
00290   if (itsBuffer->isObjectBased() && itsDidAttentionShift)
00291     mask = rescaleOpt(itsBuffer->getSaliencyMask(), tdims, itsDisplayInterp.getVal());
00292   else
00293     mask.resize(tdims, true);
00294   itsDidAttentionShift = false;
00295 
00296   // get the buffer:
00297   Image<float> buf = itsBuffer->getBuffer();
00298   inplaceNormalize(buf, 0.0F, 255.0F);
00299   Dims bdims(buf.getWidth() << maplevel, buf.getHeight() << maplevel);
00300 
00301   // get a color, full-scale version of the buffer:
00302   Image< PixRGB<byte> > bufc = toRGB(Image<byte>(rescaleOpt(buf, bdims, itsDisplayInterp.getVal())));
00303 
00304   // draw circle around saliency sample if we just took one:
00305   if (itsLastRadius != 0)
00306     {
00307       Point2D<int> p(itsLastSample); p.i <<= maplevel; p.j <<= maplevel;
00308       int r(itsLastRadius); r <<= maplevel;
00309       drawCircle(bufc, p, r, PixRGB<byte>(255, 255, 0), 2);
00310       drawCircle(smc, itsBuffer->bufferToRetinal(p), r, PixRGB<byte>(255, 255, 0), 2);
00311       itsLastRadius = 0;
00312     }
00313 
00314   // draw IOR contours if any:
00315   if (itsIORmask.initialized())
00316     {
00317       Image<byte> contou = contour2D(rescale(itsIORmask, bdims));
00318 
00319       Point2D<int> ppp;
00320       for (ppp.j = 0; ppp.j < bdims.h(); ppp.j ++)
00321         for (ppp.i = 0; ppp.i < bdims.w(); ppp.i ++)
00322           if (contou.getVal(ppp.i, ppp.j))
00323             drawDisk(bufc, ppp, 2, PixRGB<byte>(255, 255, 0));
00324       itsIORmask.freeMem();
00325     }
00326 
00327   // return mega combo; we have 4 panels of the dims of the blown-up
00328   // buffer (bdims), and we will paste images of various dims in those
00329   // panels:
00330   Image< PixRGB<byte> > ret;
00331 
00332   // start with the unshifted raw input:
00333   Image< PixRGB<byte> > rawinp(bdims, NO_INIT); rawinp.clear(bgcol);
00334   Point2D<int> rawinpoff((rawinp.getWidth()-itsRawTraj.getWidth())/2, (rawinp.getHeight()-itsRawTraj.getHeight())/2);
00335   inplacePaste(rawinp, itsRawTraj, rawinpoff);
00336 
00337   // now the foveal/shifted image:
00338   Image< PixRGB<byte> > rtraj(bdims, NO_INIT); rtraj.clear(bgcol);
00339   Point2D<int> rtrajoff((rtraj.getWidth() - itsTraj.getWidth())/2, (rtraj.getHeight() - itsTraj.getHeight())/2);
00340   inplacePaste(rtraj, itsTraj, rtrajoff);
00341 
00342   // now the saliency map:
00343   Image< PixRGB<byte> > smc2(bdims, NO_INIT); smc2.clear(bgcol);
00344   inplacePaste(smc2, smc, rtrajoff);
00345 
00346   // ready for action:
00347   ret = concatY(concatX(rawinp, rtraj), concatX(bufc, smc2));
00348 
00349   // draw separating borders:
00350   drawLine(ret, Point2D<int>(0, bdims.h()), Point2D<int>(bdims.w()*2-1, bdims.h()), gridcol, 1);
00351   drawLine(ret, Point2D<int>(bdims.w(), 0), Point2D<int>(bdims.w(), bdims.h()*2-1), gridcol, 1);
00352 
00353   return ret;
00354 
00355   */
00356   return Image< PixRGB<byte> >();
00357 }
00358 
00359 // ######################################################################
00360 void SimulationViewerEyeMvt2::
00361 onSimEventSaveOutput(SimEventQueue& q, rutz::shared_ptr<SimEventSaveOutput>& e)
00362 {
00363   // update the trajectory:
00364   Image< PixRGB<byte> > res = getTraj(q);
00365 
00366   // save results?
00367   if (itsSaveMegaCombo.getVal())
00368     {
00369       // get the OFS to save to, assuming sinfo is of type
00370       // SimModuleSaveInfo (will throw a fatal exception otherwise):
00371       nub::ref<FrameOstream> ofs = dynamic_cast<const SimModuleSaveInfo&>(e->sinfo()).ofs;
00372 
00373       ofs->writeRGB(res, "T", FrameInfo("SimulationViewerEyeMvt2 trajectory", SRC_POS));
00374     }
00375 }
00376 
00377 // ######################################################################
00378 /* So things look consistent in everyone's emacs... */
00379 /* Local Variables: */
00380 /* indent-tabs-mode: nil */
00381 /* End: */
Generated on Sun May 8 08:05:25 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3