test-Gist-Sal-Nav.C

Go to the documentation of this file.
00001 /*!@file Gist/test-Gist-Sal-Nav.C navigation using a combination saliency and
00002   gist. Input is either the camera or an MPEGStream */
00003 
00004 // //////////////////////////////////////////////////////////////////// //
00005 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00006 // University of Southern California (USC) and the iLab at USC.         //
00007 // See http://iLab.usc.edu for information about this project.          //
00008 // //////////////////////////////////////////////////////////////////// //
00009 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00010 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00011 // in Visual Environments, and Applications'' by Christof Koch and      //
00012 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00013 // pending; application number 09/912,225 filed July 23, 2001; see      //
00014 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00015 // //////////////////////////////////////////////////////////////////// //
00016 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00017 //                                                                      //
00018 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00019 // redistribute it and/or modify it under the terms of the GNU General  //
00020 // Public License as published by the Free Software Foundation; either  //
00021 // version 2 of the License, or (at your option) any later version.     //
00022 //                                                                      //
00023 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00024 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00025 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00026 // PURPOSE.  See the GNU General Public License for more details.       //
00027 //                                                                      //
00028 // You should have received a copy of the GNU General Public License    //
00029 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00030 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00031 // Boston, MA 02111-1307 USA.                                           //
00032 // //////////////////////////////////////////////////////////////////// //
00033 //
00034 // Primary maintainer for this file: Christian Siagian <siagian@usc.edu>
00035 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Gist/test-Gist-Sal-Nav.C $
00036 // $Id: test-Gist-Sal-Nav.C 14762 2011-05-03 01:13:16Z siagian $
00037 //
00038 ////////////////////////////////////////////////////////
00039 // test-Gist-Sal-Nav.C <input.mpg/CAMERA> <input_train.txt> [output_directory] [index]
00040 
00041 // This is an ongoing project for robotics navigation. Currently it is able to
00042 // recognize places through the use of gist features. It accepts an input video
00043 // clip <input.mpg> and a pre-trained neural network via a training file
00044 // <input_train.txt> - the same file is used in the training phase by train-FFN.C.
00045 
00046 // At the start, the component manager enables a standard GistEstimator and
00047 // then a neural net place recognizer is instantiated. In the main while
00048 // loop, at each time step the place recognizer hypothesized the location
00049 // based on the gist features.
00050 
00051 // Later on we will incorporate saliency to get a better spatial resolution
00052 // as well as accuracy of a location.
00053 
00054 // Related files of interest: GistEstimator.C (and .H) and
00055 // GistEstimatorConfigurator.C (and .H) used by Brain.C to compute gist features.
00056 // test-Gist.C uses GistEstimator to extract gist features from a single image.
00057 
00058 #include "Channels/ChannelOpts.H"
00059 #include "Component/GlobalOpts.H"
00060 #include "Component/ModelManager.H"
00061 #include "Component/ModelOptionDef.H"
00062 #include "Component/OptionManager.H"
00063 #include "Devices/FrameGrabberConfigurator.H"
00064 #include "Devices/DeviceOpts.H"
00065 #include "GUI/XWinManaged.H"
00066 #include "Gist/FFN.H"
00067 #include "Gist/trainUtils.H"
00068 #include "Image/ColorOps.H"
00069 #include "Image/CutPaste.H"
00070 #include "Image/DrawOps.H"
00071 #include "Image/ImageCache.H"
00072 #include "Image/MathOps.H"
00073 #include "Image/MatrixOps.H"
00074 #include "Image/Pixels.H"
00075 #include "Image/Pixels.H"
00076 #include "Image/ShapeOps.H"
00077 #include "Image/Transforms.H"
00078 #include "Media/MPEGStream.H"
00079 #include "Media/MediaOpts.H"
00080 #include "Media/MediaSimEvents.H"
00081 #include "Neuro/GistEstimatorStd.H"
00082 #include "Neuro/GistEstimatorFFT.H"
00083 #include "Neuro/InferoTemporal.H"
00084 #include "Neuro/NeuroOpts.H"
00085 #include "Neuro/NeuroSimEvents.H"
00086 #include "Neuro/Retina.H"
00087 #include "Neuro/ShapeEstimator.H"
00088 #include "Neuro/ShapeEstimatorModes.H"
00089 #include "Neuro/SpatialMetrics.H"
00090 #include "Neuro/StdBrain.H"
00091 #include "Neuro/gistParams.H"
00092 #include "Raster/Raster.H"
00093 #include "SIFT/Histogram.H"
00094 #include "Transport/FrameIstream.H"
00095 #include "SIFT/Keypoint.H"
00096 #include "SIFT/VisualObject.H"
00097 #include "SIFT/VisualObjectDB.H"
00098 #include "Simulation/SimEventQueueConfigurator.H"
00099 #include "Util/Timer.H"
00100 
00101 //! number of frames over which frame rate is computed
00102 #define NAVG 20
00103 
00104 #define W_ASPECT_RATIO  320 // ideal minimum width for display
00105 #define H_ASPECT_RATIO  240 // ideal minimum height for display
00106 
00107 rutz::shared_ptr<FeedForwardNetwork> ffn_place;
00108 Image<double> pcaIcaMatrix;
00109 
00110 CloseButtonListener wList;
00111 XWinManaged *inputWin;
00112 XWinManaged *salWin;
00113 XWinManaged *gistWin;
00114 
00115 int wDisp, hDisp, sDisp, scaleDisp;
00116 int wDispWin,  hDispWin;
00117 
00118 // gist display
00119 int pcaW = 16, pcaH = 5;
00120 int winBarW = 5, winBarH = 25;
00121 
00122 // ######################################################################
00123 void                  setupDispWin     (int w, int h);
00124 Image< PixRGB<byte> > getGistDispImg   (Image< PixRGB<byte> > img, Image<float> gistImg,
00125                                         Image<float> gistPcaImg, Image<float> outHistImg);
00126 void                  processSalCue    (Image<PixRGB<byte> > inputImg,
00127                                         nub::soft_ref<StdBrain> brain, Point2D<int> winner, int fNum,
00128 const Image<float>& semask, const std::string& selabel);
00129 // ######################################################################
00130 // Main function
00131 int main(const int argc, const char **argv)
00132 {
00133   MYLOGVERB = LOG_INFO;  // suppress debug messages
00134 
00135   // Instantiate a ModelManager:
00136   ModelManager manager("Place Localization Model");
00137 
00138   // we cannot use saveResults() on our various ModelComponent objects
00139   // here, so let's not export the related command-line options.
00140   manager.allowOptions(OPTEXP_ALL & (~OPTEXP_SAVE));
00141 
00142   // Instantiate our various ModelComponents:
00143   // either an MPEGStream
00144   nub::soft_ref<SimEventQueueConfigurator>
00145     seqc(new SimEventQueueConfigurator(manager));
00146   manager.addSubComponent(seqc);
00147 
00148   nub::soft_ref<InputMPEGStream>
00149     ims(new InputMPEGStream(manager, "Input MPEG Stream", "InputMPEGStream"));
00150   manager.addSubComponent(ims);
00151 
00152   // or a FrameGrabber
00153   nub::soft_ref<FrameGrabberConfigurator>
00154     gbc(new FrameGrabberConfigurator(manager));
00155   manager.addSubComponent(gbc);
00156 
00157   nub::soft_ref<StdBrain> brain(new StdBrain(manager));
00158   manager.addSubComponent(brain);
00159 
00160   nub::ref<SpatialMetrics> metrics(new SpatialMetrics(manager));
00161   manager.addSubComponent(metrics);
00162 
00163   manager.exportOptions(MC_RECURSE);
00164   metrics->setFOAradius(30); // FIXME
00165   metrics->setFoveaRadius(30); // FIXME
00166   manager.setOptionValString(&OPT_MaxNormType, "FancyOne");
00167   manager.setOptionValString(&OPT_UseRandom, "false");
00168   //  manager.setOptionValString("ShapeEstimatorMode","SaliencyMap");
00169   //  manager.setOptionValString(&OPT_ShapeEstimatorMode,"ConspicuityMap");
00170   manager.setOptionValString(&OPT_ShapeEstimatorMode, "FeatureMap");
00171   manager.setOptionValString(&OPT_ShapeEstimatorSmoothMethod, "Chamfer");
00172   //manager.setOptionValString(&OPT_ShapeEstimatorSmoothMethod, "Gaussian");
00173   manager.setOptionValString(&OPT_RawVisualCortexChans,"OIC");
00174   manager.setOptionValString(&OPT_IORtype, "Disc");
00175 
00176   // set up the GIST ESTIMATOR: Std or Fft: IN COMMAND LINE
00177   //manager.setOptionValString(&OPT_GistEstimatorType,"Std");
00178 
00179   // set up the INFEROTEMPORAL
00180   manager.setOptionValString(&OPT_InferoTemporalType,"Std");
00181   manager.setOptionValString(&OPT_AttentionObjRecog,"yes");
00182   manager.setOptionValString(&OPT_MatchObjects,"false");
00183   // Request a bunch of option aliases (shortcuts to lists of options):
00184   REQUEST_OPTIONALIAS_NEURO(manager);
00185 
00186   // frame grabber setup
00187   // NOTE: don't have to put the option --fg-type=1394
00188 //   manager.setOptionValString(&OPT_FrameGrabberType, "1394");
00189 //   manager.setOptionValString(&OPT_FrameGrabberDims, "160x120");
00190 //   manager.setOptionValString(&OPT_FrameGrabberMode, "YUV444");
00191 //   manager.setOptionValString(&OPT_FrameGrabberNbuf, "20");
00192 
00193   // Parse command-line:
00194   if (manager.parseCommandLine(argc, argv, "<input.mpg/CAMERA> <input_train.txt>"
00195                                "[output_directory] [index]",
00196                                2, 4) == false)
00197     return(1);
00198 
00199   nub::soft_ref<SimEventQueue> seq = seqc->getQ();
00200 
00201   // NOTE: this could now be controlled by a command-line option
00202   // --preload-mpeg=true
00203   manager.setOptionValString(&OPT_InputMPEGStreamPreload, "true");
00204 
00205   // do post-command-line configs:
00206   int w; int h;
00207   nub::soft_ref<FrameIstream> gb ;
00208   std::string camera("CAMERA");
00209 
00210   // compare returns zero if they are equal
00211   if(!manager.getExtraArg(0).compare(camera))
00212     {
00213       gb = gbc->getFrameGrabber();
00214       if (gb.isInvalid())
00215         LFATAL("You need to select a frame grabber type via the "
00216                "--fg-type=XX command-line option for this program "
00217                "to be useful -- ABORT");
00218       w = gb->getWidth(); h = gb->getHeight();
00219       std::string dims = convertToString(Dims(w, h));
00220       manager.setOptionValString(&OPT_InputFrameDims, dims);
00221       LINFO("Camera");
00222 
00223       // get the frame grabber to start streaming:
00224       gb->startStream();
00225     }
00226   else
00227     {
00228       ims->setFileName(manager.getExtraArg(0));
00229 
00230       Dims iDims = ims->peekDims();
00231       manager.setOptionValString(&OPT_InputFrameDims,
00232                                  convertToString(ims->peekDims()));
00233       // Added the sony cropping
00234       w = iDims.w() - 50 + 1; h = iDims.h();
00235       LINFO("Mpeg");
00236     }
00237 
00238   // setup  display  at the start of stream
00239   // NOTE: wDisp, hDisp, and sDisp are modified
00240   LINFO("Frame w: %d, h: %d",w, h);
00241   setupDispWin(w, h);
00242 
00243   // offset number for the saved images (for mpeg_encode)
00244   int fNumOffset = 0;
00245   if (manager.numExtraArgs() > 3)
00246       fNumOffset = manager.getExtraArgAs<int>(3);
00247 
00248   // frame delay in seconds
00249   double rtdelay = 33.3667/1000.0;    // real time
00250   double fdelay  = rtdelay*3;           // 3 times slower than real time
00251 
00252   // let's get all our ModelComponent instances started:
00253   manager.start();
00254 
00255   // get the GistEstimator
00256   LFATAL("FIXME");
00257   nub::soft_ref<GistEstimatorStd> ge;//////// =
00258   ////////    dynCastWeak<GistEstimatorStd>(brain->getGE());
00259 
00260   // main loop:
00261   SimTime prevstime = SimTime::ZERO(); int fNum = 0;
00262   Image< PixRGB<byte> > inputImg;
00263   Image< PixRGB<byte> > gistDispImg;
00264 
00265   // get place classifier parameters
00266   FFNtrainInfo pcInfo(manager.getExtraArg(1));
00267 
00268   // instantiate a 3-layer feed-forward network
00269   // initialize with the provided parameters
00270   ffn_place.reset(new FeedForwardNetwork());
00271   ffn_place->init3L(pcInfo.h1Name, pcInfo.h2Name, pcInfo.oName,
00272                     pcInfo.redFeatSize, pcInfo.h1size, pcInfo.h2size,
00273                     pcInfo.nOutput, 0.0, 0.0);
00274 
00275   // setup the PCA eigenvector
00276   pcaIcaMatrix = setupPcaIcaMatrix
00277     (pcInfo.trainFolder+pcInfo.evecFname,
00278      pcInfo.oriFeatSize, pcInfo.redFeatSize);
00279 
00280   // MAIN LOOP
00281   Timer tim(1000000); uint64 t[NAVG]; float frate = 0.0f;
00282   while(true)
00283   {
00284     // has the time come for a new frame?
00285     // LATER ON GIST WILL DECIDE IF WE WANT TO SLOW THINGS DOWN
00286     if (fNum == 0 ||
00287         (seq->now() - 0.5 * (prevstime - seq->now())).secs() - fNum * fdelay > fdelay)
00288       {
00289         tim.reset();
00290 
00291         // load or grab new frame
00292         if(!manager.getExtraArg(0).compare(camera))
00293           {
00294             inputImg = gb->readRGB();
00295               //Raster::ReadRGB("/lab/tmpi6/u/christian/beobotData/data_04_06_2006/test_011_000402.ppm");
00296           }
00297         else
00298           {
00299             inputImg = ims->readRGB();
00300 
00301             // take out frame borders NOTE: ONLY FOR SONY CAMCORDER
00302             inputImg = crop(inputImg, Rectangle::tlbrI(0, 25, h-1, 25 + w - 1));
00303           }
00304 
00305         if (inputImg.initialized() == false) break;  // end of input stream
00306 
00307         // pass input to brain:
00308         rutz::shared_ptr<SimEventInputFrame>
00309           e(new SimEventInputFrame(brain.get(), GenericFrame(inputImg), 0));
00310         seq->post(e); // post the image to the brain
00311         LINFO("new frame :%d\n",fNum);
00312 
00313         // if we don't have a GE then we have to skip Gist extraction
00314         if (!ge.isInvalid())
00315         {
00316           // get the gist feature vector
00317           // reduce feature dimension (if available)
00318           Image<double> cgist =  ge->getGist();
00319           Image<double> in = cgist;
00320           if(pcInfo.isPCA) in = matrixMult(pcaIcaMatrix, cgist);
00321 
00322           // analyze the gist features to recognize the place
00323           Image<double> out = ffn_place->run3L(in);
00324           rutz::shared_ptr<Histogram> resHist(new Histogram(pcInfo.nOutput));
00325 
00326           for(uint i = 0; i < pcInfo.nOutput; i++)
00327             {
00328               LINFO("pl[%3d]: %.4f",i, out.getVal(i));
00329               resHist->addValue(i, out.getVal(i));
00330             }
00331 
00332           // FIX FOR DISPLAY
00333           // // display or save the visuals
00334           // gistDispImg = getGistDispImg(inputImg,
00335           //                              ge->getGistImage(sDisp),
00336           //                              getPcaIcaFeatImage(in, pcaW, pcaH,sDisp*2),
00337           //                              resHist->getHistogramImage(wDisp,sDisp*2*pcaH, 0.0, 1.0));
00338 
00339           // if (manager.numExtraArgs() > 2)
00340           //   Raster::WriteRGB(gistDispImg, sformat("%s%07d.ppm", manager.getExtraArg(2).c_str(),
00341           //                                         fNum + fNumOffset));
00342           // else
00343           //   {
00344           //     inputWin->drawImage(inputImg,0,0);
00345           //     gistWin->drawImage(gistDispImg,0,0);
00346           //     //Raster::waitForKey();
00347           //   }
00348         }
00349         else
00350           LINFO("Cannot compute gist without a Gist Estimator");
00351 
00352         // compute and show framerate over the last NAVG frames:
00353         t[fNum % NAVG] = tim.get();
00354         if (fNum % 5 == 0)
00355           {
00356             uint64 avg = 0ULL; for (int i = 0; i < NAVG; i ++) avg += t[i];
00357             frate = 1000000.0F / float(avg) * float(NAVG);
00358             printf("[%6d] Frame rate: %f fps -> %f ms/frame \n",fNum,frate, 1000.0/frate);
00359           }
00360 
00361        // increment frame count
00362         fNum++;
00363       }
00364 
00365     // evolve brain:
00366     prevstime = seq->now(); // time before current step
00367     const SimStatus status = seq->evolve();
00368 
00369     // FIX THIS LATER
00370     // process if SALIENT location is found
00371     if (SeC<SimEventWTAwinner> e = seq->check<SimEventWTAwinner>(0))
00372       {
00373         // localize using the salient cue
00374         //const Point2D<int> winner = brain->getLastCovertPos();
00375         // use Shape estimator to focus on the attended region
00376         Image<float> fmask; std::string label;
00377         if (SeC<SimEventShapeEstimatorOutput>
00378             e = seq->check<SimEventShapeEstimatorOutput>(0))
00379           { fmask = e->smoothMask(); label = e->winningLabel(); }
00380         //processSalCue(inputImg, brain, winner, fNum, fmask, label);
00381       }
00382 
00383     if (SIM_BREAK == status) // Brain decided it's time to quit
00384       break;
00385   }
00386 
00387   //uint64 t = tim.get();
00388   //printf("It takes %.3fms to process %d frame = %.3f ms/frame\n",
00389   //       float(t)* 0.001F, fNum, float(t)/float(fNum)* 0.001F);
00390 
00391   // stop all our ModelComponents
00392   manager.stop();
00393 
00394   // all done!
00395   return 0;
00396 }
00397 
00398 // ######################################################################
00399 // process salient cues
00400 void processSalCue(Image<PixRGB<byte> > inputImg,
00401                    nub::soft_ref<StdBrain> brain, Point2D<int> winner, int fNum,
00402                    const Image<float>& semask, const std::string& selabel)
00403 {
00404   // use Shape estimator to focus on the attended region
00405   Image<float> roiImg;Image<PixRGB<byte> > objImg;
00406   if (semask.initialized())
00407     {
00408       float mn, mx; getMinMax(semask,mn,mx);
00409       Rectangle r = findBoundingRect(semask, mx*.05f);
00410       objImg = crop(inputImg, r);
00411       roiImg = semask * luminance(inputImg);
00412     }
00413   else
00414     {
00415       objImg = inputImg;
00416       roiImg = luminance(inputImg);
00417     }
00418 
00419   // we need a Visual Cortex to obtain a feature vector
00420   LFATAL("fixme");
00421   nub::soft_ref<VisualCortex> vc;///////// = brain->getVC();
00422   ///////  std::vector<float> fvec; vc->getFeatures(winner, fvec);
00423 
00424   //   SIFT key-point
00425   // create a new VisualObject. Since we give it no keypoints, they
00426   // will be automatically computed:
00427   //rutz::shared_ptr<VisualObject>
00428   //  obj(new VisualObject("NewObject", "NewObject", roiImg, fvec));
00429 
00430   // ----------------------------------------------
00431   // match the salient Region
00432 
00433   // WITH DATA BASE AND DESCRIPTION
00434 
00435   // ----------------------------------------------
00436 
00437   // draw the results
00438   drawCircle(roiImg, winner, 10, 0.0f, 1);
00439   drawPoint(roiImg, winner.i, winner.j, 0.0f);
00440   LINFO("\nFrame: %d, winner: (%d,%d) in %s\n\n",
00441         fNum, winner.i, winner.j, selabel.c_str());
00442   salWin->drawImage(roiImg,0,0);
00443   salWin->drawImage(objImg,inputImg.getWidth(),0);
00444   Raster::waitForKey();
00445 
00446 }
00447 
00448 // ######################################################################
00449 // setup display window for visualization purposes
00450 void setupDispWin(int w, int h)
00451 {
00452 
00453   inputWin = new XWinManaged(Dims(w, h), 2*w, 0, "Original Input Image" );
00454   wList.add(inputWin);
00455 
00456   // figure out the best display w, h, and scale for gist
00457 
00458   // check if both dimensions of the image
00459   // are much smaller than the desired resolution
00460   scaleDisp = 1;
00461   while (w*scaleDisp < W_ASPECT_RATIO*.75 && h*scaleDisp < H_ASPECT_RATIO*.75)
00462     scaleDisp++;
00463 
00464   // check if the height is longer aspect-ratio-wise
00465   // this is because the whole display is setup wrt/ to it
00466   wDisp = w*scaleDisp; hDisp = h*scaleDisp;
00467   if(wDisp/(0.0 + W_ASPECT_RATIO) > hDisp/(0.0 + H_ASPECT_RATIO))
00468     hDisp = (int)(wDisp / (0.0 + W_ASPECT_RATIO) * H_ASPECT_RATIO)+1;
00469   else
00470     wDisp = (int)(hDisp / (0.0 + H_ASPECT_RATIO) * W_ASPECT_RATIO)+1;
00471 
00472   // add slack so that the gist feature entry is square
00473   sDisp = (hDisp/NUM_GIST_FEAT + 1);
00474   hDisp =  sDisp * NUM_GIST_FEAT;
00475 
00476   // add space for all the visuals
00477   wDispWin = wDisp + sDisp * NUM_GIST_COL;
00478   hDispWin = hDisp + sDisp * pcaH * 2;
00479 
00480   gistWin  = new XWinManaged(Dims(wDispWin, hDispWin), 0, 0, "Gist Related");
00481   wList.add(gistWin);
00482 
00483   salWin   = new XWinManaged(Dims(2*w, h), 0, 2*h, "Saliency Related" );
00484   wList.add(salWin);
00485 }
00486 
00487 // ######################################################################
00488 // get display image for visualization purposes
00489 Image< PixRGB<byte> > getGistDispImg (Image< PixRGB<byte> > img,
00490                                       Image<float> gistImg,
00491                                       Image<float> gistPcaImg,
00492                                       Image<float> outHistImg)
00493 {
00494   Image< PixRGB<byte> > gistDispImg(wDispWin, hDispWin, ZEROS);
00495   int w = img.getWidth(); int h = img.getHeight();
00496 
00497   // grid the displayed input image
00498   Image< PixRGB<byte> > tImg = img;
00499   drawGrid(tImg, w/4,h/4,1,1,PixRGB<byte>(255,255,255));
00500   inplacePaste(gistDispImg, tImg,        Point2D<int>(0, 0));
00501 
00502   // display the gist features
00503   inplaceNormalize(gistImg, 0.0f, 255.0f);
00504   inplacePaste(gistDispImg, Image<PixRGB<byte> >(gistImg),    Point2D<int>(wDisp, 0));
00505 
00506   // display the PCA gist features
00507   inplaceNormalize(gistPcaImg, 0.0f, 255.0f);
00508   inplacePaste(gistDispImg, Image<PixRGB<byte> >(gistPcaImg), Point2D<int>(wDisp, hDisp));
00509 
00510   // display the classifier output histogram
00511   inplaceNormalize(outHistImg, 0.0f, 255.0f);
00512   inplacePaste(gistDispImg, Image<PixRGB<byte> >(outHistImg), Point2D<int>(0, hDisp));
00513 
00514   // draw lines delineating the information
00515   drawLine(gistDispImg, Point2D<int>(0,hDisp),
00516            Point2D<int>(wDispWin,hDisp),
00517            PixRGB<byte>(255,255,255),1);
00518   drawLine(gistDispImg, Point2D<int>(wDisp-1,0),
00519            Point2D<int>(wDisp-1,hDispWin-1),
00520            PixRGB<byte>(255,255,255),1);
00521   return gistDispImg;
00522 }
00523 
00524 // ######################################################################
00525 // canonical gray is (128, 128, 128)
00526 Image< PixRGB<byte> > greyWorldNormalize(Image< PixRGB<byte> > img)
00527 {
00528   Image<byte> rImg;
00529   Image<byte> gImg;
00530   Image<byte> bImg;
00531 //   getComponents(img, rImg, gImg, bImg);
00532 
00533   //int rMin, rMax, gMin, gMax, bMin, gMax;
00534   double rMean = mean(rImg);
00535   double gMean = mean(gImg);
00536   double bMean = mean(bImg);
00537   printf("mean = [%f,%f,%f]\n",rMean, gMean, bMean);
00538 
00539   Image<float> rtImg = (rImg * (128.0/rMean)) + .5;
00540   Image<float> gtImg = (gImg * (128.0/gMean)) + .5;
00541   Image<float> btImg = (bImg * (128.0/bMean)) + .5;
00542   inplaceClamp(rtImg, 0.0f,255.0f);
00543   inplaceClamp(gtImg, 0.0f,255.0f);
00544   inplaceClamp(btImg, 0.0f,255.0f);
00545 
00546   Image< PixRGB <byte> > res = makeRGB(rtImg, gtImg, btImg);
00547   return res;
00548 }
00549 
00550 // ######################################################################
00551 /* So things look consistent in everyone's emacs... */
00552 /* Local Variables: */
00553 /* indent-tabs-mode: nil */
00554 /* End: */
Generated on Sun May 8 08:40:39 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3