NeoBrain.C

00001 /*!@file Neuro/NeoBrainVss.C for the vss demos */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Lior Elazary <elazary@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Neuro/NeoBrain.C $
00035 // $Id: NeoBrain.C 13716 2010-07-28 22:07:03Z itti $
00036 //
00037 
00038 #include "Neuro/NeoBrain.H"
00039 
00040 #include "GUI/DebugWin.H"
00041 #include "Component/ModelOptionDef.H"
00042 #include "Component/OptionManager.H" // for REQUEST_OPTIONALIAS_NEURO()
00043 #include "Image/MathOps.H" // for findMax()
00044 #include "Neuro/NeuroOpts.H"
00045 #include "rutz/error_context.h"
00046 #include "rutz/mutex.h"
00047 #include "rutz/sfmt.h"
00048 
00049 #include <algorithm>
00050 
00051 static const ModelOptionDef OPT_NeobrainSpeakSaliency =
00052   { MODOPT_FLAG, "NeobrainSpeakSaliency", &MOC_BRAIN, OPTEXP_CORE,
00053     "Whether to use speech to speak about saliency in NeoBrain",
00054     "neobrain-speak-saliency", '\0', "", "false" };
00055 
00056 static const ModelOptionDef OPT_NeobrainSpeakObjects =
00057   { MODOPT_FLAG, "NeobrainSpeakObjects", &MOC_BRAIN, OPTEXP_CORE,
00058     "Whether to use speech to speak about objects in NeoBrain",
00059     "neobrain-speak-objects", '\0', "", "false" };
00060 
00061 static const ModelOptionDef OPT_NeobrainSpeechFile =
00062   { MODOPT_ARG_STRING, "NeobrainSpeechFile", &MOC_BRAIN, OPTEXP_CORE,
00063     "Speech utterances for various speech tokens",
00064     "neobrain-speech-file", '\0', "<filename>", "etc/speech.pmap" };
00065 
00066 static const ModelOptionDef OPT_NeobrainBoringnessThresh =
00067   { MODOPT_ARG(int), "NeobrainBoringnessThresh", &MOC_BRAIN, OPTEXP_CORE,
00068     "Threshold for boringness beyond which we start a new track",
00069     "neobrain-boringness-thresh", '\0', "<int>", "80" };
00070 
00071 static const ModelOptionDef OPT_NeobrainTrackDelayFrames =
00072   { MODOPT_ARG(unsigned long), "NeobrainTrackDelayFrames", &MOC_BRAIN, OPTEXP_CORE,
00073     "Number of frames to wait after a shift before starting tracking",
00074     "neobrain-track-delay-frames", '\0', "<ulong>", "20" };
00075 
00076 static const ModelOptionDef OPT_NeobrainStopTrackDelayFrames =
00077   { MODOPT_ARG(int), "NeobrainStopTrackDelayFrames", &MOC_BRAIN, OPTEXP_CORE,
00078     "Number of frames to wait after deciding to stop tracking before\n"
00079     "the tracking is actually disengaged. A value of -1 will never stop. ",
00080     "neobrain-stop-track-delay-frames", '\0', "<int>", "10" };
00081 
00082 static const ModelOptionDef OPT_TrackTarget =
00083   { MODOPT_FLAG, "TrackTarget", &MOC_BRAIN, OPTEXP_CORE,
00084     "Whether to start up in target tracking mode.",
00085     "track-target", '\0', ""
00086 #ifdef HAVE_OPENCV
00087     , "true"
00088 #else
00089     , "false" // we can't do tracking without OpenCV
00090 #endif
00091   };
00092 
00093 static const ModelOptionDef OPT_NeobrainKeepTracking =
00094   { MODOPT_FLAG, "NeobrainKeepTracking", &MOC_BRAIN, OPTEXP_CORE,
00095     "If this option is true, the the brain will try to keep tracking the\n"
00096     "object for as long as posible",
00097     "neobrain-keeptracking", '\0', "", "false" };
00098 
00099 
00100 // ######################################################################
00101 NeoBrain::NeoBrain(OptionManager& mgr, const std::string& descrName,
00102                    const std::string& tagName)
00103   :
00104   ModelComponent(mgr, descrName, tagName),
00105   itsAllowTracking(&OPT_TrackTarget, this, ALLOW_ONLINE_CHANGES),
00106   itsKeepTracking(&OPT_NeobrainKeepTracking, this, ALLOW_ONLINE_CHANGES),
00107   itsUseHead("NeobrainUseHead", this, true, ALLOW_ONLINE_CHANGES),
00108   itsRelaxNeck("NeobrainRelaxNeck", this, true, ALLOW_ONLINE_CHANGES),
00109   itsSleeping("Sleeping", this, false, ALLOW_ONLINE_CHANGES),
00110   itsBoringnessThresh(&OPT_NeobrainBoringnessThresh, this, ALLOW_ONLINE_CHANGES),
00111   itsErrTolerance("NeobrainErrTolerance", this, 1, ALLOW_ONLINE_CHANGES),
00112   itsDistTolerance("NeobrainDistTolerance", this, 2, ALLOW_ONLINE_CHANGES),
00113   itsTrackDelayFrames(&OPT_NeobrainTrackDelayFrames, this,
00114                       ALLOW_ONLINE_CHANGES),
00115   itsBigErrFramesThresh("NeobrainBigErrFramesThresh", this, 500,
00116                         ALLOW_ONLINE_CHANGES),
00117   itsTargetFramesThresh("NeobrainTargetFramesThresh", this, 300,
00118                         ALLOW_ONLINE_CHANGES),
00119   itsNoMoveFramesThresh("NeobrainNoMoveFramesThresh", this, 1000,
00120                         ALLOW_ONLINE_CHANGES),
00121   itsStopTrackDelayFrames(&OPT_NeobrainStopTrackDelayFrames, this,
00122                           ALLOW_ONLINE_CHANGES),
00123   itsHeadInfoEyeTiltPos("HeadInfoEyeTiltPos", this, -1.00,
00124                           ALLOW_ONLINE_CHANGES),
00125   itsHeadInfoEyePanPos("HeadInfoEyePanPos", this, 0.20,
00126                           ALLOW_ONLINE_CHANGES),
00127   itsHeadInfoHeadPanPos("HeadInfoHeadPanPos", this, -1.00,
00128                           ALLOW_ONLINE_CHANGES),
00129   itsSpeakSaliency(&OPT_NeobrainSpeakSaliency, this, ALLOW_ONLINE_CHANGES),
00130   itsSpeakObjects(&OPT_NeobrainSpeakObjects, this, ALLOW_ONLINE_CHANGES),
00131   itsSpeechFile(&OPT_NeobrainSpeechFile, this),
00132   itsRefreshSpeechFile("NeobrainRefreshSpeechFile", this, false,
00133                        ALLOW_ONLINE_CHANGES),
00134   itsExcitementThresh("NeobrainExcitementThresh", this, 220.f,
00135                       ALLOW_ONLINE_CHANGES),
00136   itsTargetFrames(0),
00137   itsBigErrFrames(0),
00138   itsNoMoveFrames(0),
00139   itsStopFrames(0),
00140   itsHeadInfoFrames(0),
00141   itsPrevTargetX(-1.0f),
00142   itsPrevTargetY(-1.0f),
00143   itsBoringness(0.0f),
00144   itsBoringCount(0),
00145   itsExcitementLevel(0.0f),
00146   itsSleep(1000.0f),
00147   itsPrepSleep(0),
00148   itsAlmostSinging(false)
00149 {
00150   itsBeoHead = nub::soft_ref<BeoHead>(new BeoHead(mgr));
00151   addSubComponent(itsBeoHead);
00152 
00153   // Instantiate our various ModelComponents:
00154   itsSpeechSynth = nub::soft_ref<SpeechSynth>(new SpeechSynth(mgr));
00155   addSubComponent(itsSpeechSynth);
00156 
00157   if (0 != pthread_mutex_init(&itsSpeechTokenMapMutex, NULL))
00158     LFATAL("pthread_mutex_init() failed");
00159 
00160 #ifdef HAVE_OPENCV
00161   this->points[0] = NULL;
00162   this->points[1] = NULL;
00163   this->status = NULL;
00164   this->pyramid = NULL;
00165   this->prev_pyramid = NULL;
00166 #else
00167   // we can't do tracking without OpenCV, so no point in allowing the
00168   // user to try to turn it on:
00169   itsAllowTracking.setInactive(true);
00170 #endif
00171 }
00172 
00173 // ######################################################################
00174 NeoBrain::~NeoBrain()
00175 {
00176   if (0 != pthread_mutex_destroy(&itsSpeechTokenMapMutex))
00177     LERROR("pthread_mutex_destroy() failed");
00178 
00179 #ifdef HAVE_OPENCV
00180   //cvFree(&this->points[0]);
00181   //cvFree(&this->points[1]);
00182   //cvFree(&this->status);
00183   cvReleaseImage(&this->pyramid);
00184   cvReleaseImage(&this->prev_pyramid);
00185 #endif
00186 }
00187 
00188 // ######################################################################
00189 void NeoBrain::start2()
00190 {
00191   itsBeoHead->relaxNeck();
00192 
00193   //if (itsSpeakSaliency.getVal())
00194   if (!readSpeechFile(itsSpeechTokenMap, itsSpeechFile.getVal()))
00195     itsSpeakSaliency.setVal(false);
00196 
00197   if (itsSpeakSaliency.getVal())
00198     saveSpeechFile(itsSpeechTokenMap, "backup.pmap");
00199 }
00200 
00201 // ######################################################################
00202 void NeoBrain::init(Dims imageDims, int nPoints, int wz )
00203 {
00204   win_size = wz;
00205 
00206 #ifdef HAVE_OPENCV
00207   MAX_COUNT = nPoints;
00208   count = 0;
00209   points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
00210   points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
00211 
00212   prev_grey = Image<byte>(imageDims, ZEROS);
00213   pyramid = cvCreateImage( cvSize(imageDims.w(), imageDims.h()), 8, 1 );
00214   prev_pyramid = cvCreateImage( cvSize(imageDims.w(), imageDims.h()), 8, 1 );
00215   status = (char*)cvAlloc(MAX_COUNT);
00216 #endif
00217 
00218   flags = 0;
00219   itsState = CHECK_TARGET;
00220   itsImageDims = imageDims;
00221   itsTracking = false;
00222 
00223 //  if (itsSpeakSaliency.getVal())
00224   {
00225     itsSpeechSynth->sendCommand("(lex.add.entry '(\"bigpause\" n (((pau) 1) ((pau) 1) ((pau) 1) ((pau) 1))))\n", -10, true);
00226 
00227     itsSpeechSynth->sendCommand("(set! daisy (wave.load \"daisy.wav\"))",
00228         -10, true);
00229     itsSpeechSynth->sendCommand("(set! headinfo (wave.load \"headInfo.wav\"))",
00230         -10, true);
00231     itsSpeechSynth->sendCommand("(voice_cmu_us_rms_arctic_clunits)",
00232         -10, true);
00233   }
00234 }
00235 
00236 
00237 // ######################################################################
00238 bool NeoBrain::readSpeechFile(TokenMap& tokenMap,
00239                               const std::string& fname)
00240 {
00241   rutz::shared_ptr<ParamMap> pmap;
00242 
00243   try
00244     {
00245       pmap = ParamMap::loadPmapFile(fname);
00246     }
00247   catch (std::exception& e)
00248     {
00249       REPORT_CURRENT_EXCEPTION;
00250       return false;
00251     }
00252 
00253   ASSERT(pmap.is_valid());
00254 
00255   GVX_ERR_CONTEXT(rutz::sfmt("unpacking pmap file %s", fname.c_str()));
00256 
00257   tokenMap.clear();
00258 
00259   const int numTokenTypes = pmap->getIntParam("NUM_TOKEN_TYPES");
00260   for (int i = 0; i < numTokenTypes; ++i)
00261     {
00262       rutz::shared_ptr<ParamMap> submap =
00263         pmap->getSubpmap(sformat("TOKEN_TYPE_%d", i));
00264 
00265       GVX_ERR_CONTEXT(rutz::sfmt("unpacking TOKEN_TYPE_%d", i));
00266 
00267       const std::string name = submap->getStringParam("NAME");
00268 
00269       TokenType toktype;
00270 
00271       const int numTokens = submap->getIntParam("NUM_TOKENS");
00272       for (int k = 0; k < numTokens; ++k)
00273         {
00274           rutz::shared_ptr<ParamMap> subsubmap =
00275             submap->getSubpmap(sformat("TOKEN_%d", k));
00276 
00277           GVX_ERR_CONTEXT(rutz::sfmt("unpacking TOKEN_%d", k));
00278 
00279           SpeechToken tok;
00280           tok.low = subsubmap->getIntParam("LOW");
00281           tok.high = subsubmap->getIntParam("HIGH");
00282 
00283           const int numTextItems = subsubmap->getIntParam("NUM_TEXT_ITEMS");
00284 
00285           for (int j = 0; j < numTextItems; ++j)
00286             {
00287               tok.textList.push_back(subsubmap->getStringParam
00288                                      (sformat("TEXT_ITEM_%d", j)));
00289             }
00290 
00291           toktype.tokens.push_back(tok);
00292         }
00293 
00294       tokenMap[name] = toktype;
00295     }
00296 
00297   return true;
00298 }
00299 
00300 // ######################################################################
00301 void NeoBrain::saveSpeechFile(const TokenMap& tokenMap,
00302                               const std::string& fname)
00303 {
00304   rutz::shared_ptr<ParamMap> pmap(new ParamMap);
00305 
00306   int numTokenTypes = 0;
00307   for (TokenMap::const_iterator
00308          itr = tokenMap.begin(), stop = tokenMap.end();
00309        itr != stop; ++itr)
00310     {
00311       rutz::shared_ptr<ParamMap> submap(new ParamMap);
00312 
00313       submap->putStringParam("NAME", (*itr).first);
00314 
00315       int numTokens = 0;
00316       for (size_t j = 0; j < (*itr).second.tokens.size(); ++j)
00317         {
00318           rutz::shared_ptr<ParamMap> subsubmap(new ParamMap);
00319 
00320           subsubmap->putIntParam("LOW", (*itr).second.tokens[j].low);
00321           subsubmap->putIntParam("HIGH", (*itr).second.tokens[j].high);
00322           int numTextItems = 0;
00323           for (size_t i = 0; i < (*itr).second.tokens[j].textList.size(); ++i)
00324             {
00325               subsubmap->putStringParam(sformat("TEXT_ITEM_%d", numTextItems),
00326                                         (*itr).second.tokens[j].textList[i]);
00327               ++numTextItems;
00328             }
00329           subsubmap->putIntParam("NUM_TEXT_ITEMS", numTextItems);
00330 
00331           submap->putSubpmap(sformat("TOKEN_%d", numTokens), subsubmap);
00332 
00333           ++numTokens;
00334         }
00335 
00336       submap->putIntParam("NUM_TOKENS", numTokens);
00337 
00338       pmap->putSubpmap(sformat("TOKEN_TYPE_%d", numTokenTypes), submap);
00339 
00340       ++numTokenTypes;
00341     }
00342   pmap->putIntParam("NUM_TOKEN_TYPES", numTokenTypes);
00343 
00344   pmap->format(fname);
00345 }
00346 
00347 // ######################################################################
00348 std::string NeoBrain::getToken(const std::string& token, int val) const
00349 {
00350 
00351   std::string result;
00352 
00353   {
00354     GVX_MUTEX_LOCK(&itsSpeechTokenMapMutex);
00355 
00356     TokenMap::const_iterator itr = itsSpeechTokenMap.find(token);
00357     if (itr == itsSpeechTokenMap.end())
00358     {
00359       LERROR("no such speech token: %s", token.c_str());
00360       TokenMap::const_iterator itr2;
00361       for(itr2=itsSpeechTokenMap.begin(); itr2 != itsSpeechTokenMap.end(); ++itr2)
00362       {
00363         std::string tmp = (*itr2).first; //.getTextItemForVal(2);
00364         LINFO("%s", tmp.c_str());
00365       }
00366     }
00367     else
00368       result = (*itr).second.getTextItemForVal(val);
00369   }
00370 
00371   return result;
00372 }
00373 
00374 // ######################################################################
00375 bool NeoBrain::sayToken(const std::string& token,
00376                         int val, int priority) const
00377 {
00378   const std::string text = this->getToken(token, val);
00379 
00380   if (!itsSpeakSaliency.getVal())
00381     return false;
00382 
00383   if (text.length() == 0)
00384     return false;
00385 
00386   return this->sayText(text, priority, false);
00387 }
00388 
00389 // ######################################################################
00390 bool NeoBrain::sayObjectLabel(const std::string& label,
00391                               int confidence, bool forceLabel)
00392 {
00393   if ( (label == "nomatch"
00394         && label == "none"
00395         && label == ""
00396         && label != itsLastSpokenLabel) ||
00397         forceLabel)
00398     {
00399       if (itsSpeakObjects.getVal())
00400       {
00401 
00402         const std::string intro = this->getToken("object_intro", confidence);
00403         if (!intro.empty()
00404             &&
00405             this->sayText(sformat("%s %s", intro.c_str(), label.c_str()),
00406               /* priority = */ -5,
00407               /* block = */ false))
00408         {
00409     //      setKeepTracking(false);
00410           itsLastSpokenLabel = label;
00411           return true;
00412         }
00413 
00414 //      if (itsSpeakObjects.getVal()
00415 //          &&
00416 //          itsSpeechSynth->playWavFile(label + ".wav",
00417 //                                      /* priority = */ -5,
00418 //                                      /* block = */ false))
00419 //        {
00420 //          setKeepTracking(false);
00421 //          itsLastSpokenLabel = label;
00422 //          return true;
00423 //        }
00424       }
00425     }
00426 
00427   itsLastSpokenLabel = "";
00428 
00429   return false;
00430 }
00431 
00432 // ######################################################################
00433 void NeoBrain::setTarget(const Point2D<int> loc, const Image<byte>& grey,
00434                          const int saliencyval, bool changeState, bool forceNewLocation)
00435 {
00436   if (forceNewLocation)
00437     itsTracking = false;
00438 
00439   //Dont set the target if we are tracking
00440   if (!itsAllowTracking.getVal() || itsTracking)
00441     return;
00442 
00443 #ifdef HAVE_OPENCV
00444 
00445   count = MAX_COUNT;
00446 
00447   IplImage* tmp = img2ipl(grey);
00448   if (count > 1)
00449   {
00450     IplImage* eig = cvCreateImage(cvGetSize(tmp), 32, 1);
00451     IplImage* temp = cvCreateImage(cvGetSize(tmp), 32, 1);
00452     double quality = 0.01;
00453     double min_distance = 5;
00454 
00455     cvGoodFeaturesToTrack(tmp, eig, temp, points[1], &count,
00456         quality, min_distance, 0, 3, 0, 0.04);
00457     cvReleaseImage(&eig);
00458     cvReleaseImage(&temp);
00459 
00460   } else {
00461     //get from the saliency map
00462     points[1][0].x = loc.i;
00463     points[1][0].y = loc.j;
00464 
00465   }
00466   cvFindCornerSubPix(tmp, points[1], count,
00467       cvSize(win_size,win_size), cvSize(-1,-1),
00468       cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,
00469         20,0.03));
00470   cvReleaseImageHeader(&tmp);
00471 
00472   IplImage *swap_temp;
00473   prev_grey = grey;
00474   CV_SWAP( prev_pyramid, pyramid, swap_temp );
00475   CV_SWAP( points[0], points[1], swap_points );
00476 
00477 
00478   //LINFO("Init %i point (%f,%f)\n", count, points[1][0].x, points[1][0].y);
00479 
00480   if (changeState) {
00481     itsState = CHECK_TARGET;
00482   }
00483   itsTracking = true;
00484 #endif
00485 
00486   if (saliencyval >= 0)
00487     this->saySaliencyVal(byte(saliencyval));
00488 }
00489 
00490 // ######################################################################
00491 Point2D<int> NeoBrain::trackObject(const Image<byte>& grey)
00492 {
00493   itsRefreshSpeechFile.setVal(false);
00494 
00495   Point2D<int> targetLoc(-1,-1);
00496 
00497 #ifdef HAVE_OPENCV
00498   if (itsAllowTracking.getVal() && itsTracking)
00499   {
00500     if (count > 0)
00501     {
00502       IplImage* tmp1 = img2ipl(prev_grey);
00503       IplImage* tmp2 = img2ipl(grey);
00504 
00505       cvCalcOpticalFlowPyrLK(tmp1, tmp2, prev_pyramid, pyramid,
00506                              points[0], points[1], count,
00507                              cvSize(win_size,win_size), 3, status, 0,
00508                              cvTermCriteria(CV_TERMCRIT_ITER
00509                                             |CV_TERMCRIT_EPS,
00510                                             20,0.03), flags);
00511 
00512       cvReleaseImageHeader(&tmp1);
00513       cvReleaseImageHeader(&tmp2);
00514 
00515       flags |= CV_LKFLOW_PYR_A_READY;
00516 
00517       //show track points
00518       int k, i;
00519       for(i = k = 0; i<count; i++)
00520       {
00521         if (!status[i])
00522           continue;
00523         points[1][k++] = points[1][i];
00524 
00525         targetLoc.i = std::min(grey.getWidth()-1, std::max(0, (int)points[1][i].x));
00526         targetLoc.j = std::min(grey.getHeight()-1, std::max(0, (int)points[1][i].y));
00527         ASSERT(grey.coordsOk(targetLoc));
00528       }
00529       count = k;
00530 
00531     }
00532 
00533     IplImage *swap_temp;
00534     CV_SWAP( prev_pyramid, pyramid, swap_temp );
00535     CV_SWAP( points[0], points[1], swap_points );
00536 
00537     moveHeadToTarget();
00538   }
00539   prev_grey = grey;
00540 #endif
00541 
00542   return targetLoc;
00543 }
00544 
00545 // ######################################################################
00546 std::vector<Point2D<int> > NeoBrain::getTrackersLoc(const Image<byte>& grey)
00547 {
00548   std::vector<Point2D<int> > trackersLoc;
00549 
00550 #ifdef HAVE_OPENCV
00551   if (itsAllowTracking.getVal() && itsTracking)
00552   {
00553     if (count > 0)
00554     {
00555       IplImage* tmp1 = img2ipl(prev_grey);
00556       IplImage* tmp2 = img2ipl(grey);
00557 
00558       cvCalcOpticalFlowPyrLK(tmp1, tmp2, prev_pyramid, pyramid,
00559                              points[0], points[1], count,
00560                              cvSize(win_size,win_size), 3, status, 0,
00561                              cvTermCriteria(CV_TERMCRIT_ITER
00562                                             |CV_TERMCRIT_EPS,
00563                                             20,0.03), flags);
00564 
00565       cvReleaseImageHeader(&tmp1);
00566       cvReleaseImageHeader(&tmp2);
00567 
00568       flags |= CV_LKFLOW_PYR_A_READY;
00569 
00570       //show track points
00571       int k, i;
00572       for(i = k = 0; i<count; i++)
00573       {
00574         if (!status[i])
00575           continue;
00576         points[1][k++] = points[1][i];
00577 
00578         Point2D<int> tracker(std::min(grey.getWidth()-1, std::max(0, (int)points[1][i].x)),
00579             std::min(grey.getHeight()-1, std::max(0, (int)points[1][i].y)));
00580         trackersLoc.push_back(tracker);
00581       }
00582       count = k;
00583 
00584     }
00585 
00586     IplImage *swap_temp;
00587     CV_SWAP( prev_pyramid, pyramid, swap_temp );
00588     CV_SWAP( points[0], points[1], swap_points );
00589   }
00590   prev_grey = grey;
00591 #endif
00592 
00593   return trackersLoc;
00594 }
00595 
00596 
00597 // ######################################################################
00598 void NeoBrain::moveHeadToTarget()
00599 {
00600 #ifdef HAVE_OPENCV
00601   if (count > 0)
00602     {
00603 
00604       float targetX = points[1][0].x/itsImageDims.w();
00605       float targetY = points[1][0].y/itsImageDims.h();
00606 
00607       itsStats.bigerrframes        = itsBigErrFrames;
00608       itsStats.bigerrframes_thresh = itsBigErrFramesThresh.getVal();
00609       itsStats.targetframes        = itsTargetFrames;
00610       itsStats.targetframes_thresh = itsTargetFramesThresh.getVal();
00611       itsStats.nomoveframes        = itsNoMoveFrames;
00612       itsStats.nomoveframes_thresh = itsNoMoveFramesThresh.getVal();
00613       itsStats.stopframes          = itsStopFrames;
00614       itsStats.stopframes_thresh   = itsStopTrackDelayFrames.getVal();
00615 
00616 
00617       itsStats.leftEyePanPos   =  itsBeoHead->getLeftEyePanPos();
00618       itsStats.leftEyeTiltPos  =  itsBeoHead->getLeftEyeTiltPos();
00619       itsStats.rightEyePanPos  =  itsBeoHead->getRightEyePanPos();
00620       itsStats.rightEyeTiltPos =  itsBeoHead->getRightEyeTiltPos();
00621       itsStats.headPanPos      =  itsBeoHead->getHeadPanPos();
00622       itsStats.headTiltPos     =  itsBeoHead->getHeadTiltPos();
00623       itsStats.headYawPos      =  itsBeoHead->getHeadYawPos();
00624 
00625 
00626       switch(itsState)
00627         {
00628         case CHECK_TARGET:
00629           itsTargetFrames++;
00630           if (targetX > 1.0 || targetX < 0 ||
00631               targetY > 1.0 || targetY < 0)
00632             {
00633               enterCheckTargetState();
00634               itsTracking = false;
00635             }
00636           else if (itsTargetFrames > itsTrackDelayFrames.getVal())
00637             {
00638               itsPrevTargetX = targetX;
00639               itsPrevTargetY = targetY;
00640               itsState = TRACK_TARGET;
00641             }
00642           break;
00643 
00644         case TRACK_TARGET:
00645           itsTargetFrames++;
00646 
00647           if (!itsKeepTracking.getVal() &&
00648               itsStopFrames > 0)
00649             ++itsStopFrames;
00650 
00651           LDEBUG("itsBigErrFrames=%lu (thresh %lu), "
00652                  "itsTargetFrames=%lu (thresh %lu), "
00653                  "itsNoMoveFrames=%lu (thresh %lu), "
00654                  "itsStopFrames=%lu (thresh %i), "
00655                  "itsBoringness=%.2f (thresh %d)",
00656                  itsBigErrFrames, itsBigErrFramesThresh.getVal(),
00657                  itsTargetFrames, itsTargetFramesThresh.getVal(),
00658                  itsNoMoveFrames, itsNoMoveFramesThresh.getVal(),
00659                  itsStopFrames, itsStopTrackDelayFrames.getVal(),
00660                  itsBoringness, itsBoringnessThresh.getVal());
00661 
00662           if (itsStopTrackDelayFrames.getVal() != -1 &&
00663               (int)itsStopFrames >= itsStopTrackDelayFrames.getVal())
00664             {
00665               enterCheckTargetState();
00666               itsTracking = false;
00667             }
00668           else if (itsStopFrames == 0
00669               && itsBigErrFrames >= itsBigErrFramesThresh.getVal())
00670             {
00671               this->sayToken("notrack_target", 0, 10);
00672               ++itsStopFrames;
00673             }
00674           else if (!itsKeepTracking.getVal() &&
00675                    itsStopFrames == 0
00676                    && itsTargetFrames >= itsTargetFramesThresh.getVal())
00677             {
00678               this->sayToken("tiresome_target", 0, 10);
00679               ++itsStopFrames;
00680             }
00681           else if (!itsKeepTracking.getVal() &&
00682                    itsStopFrames == 0
00683                    && itsNoMoveFrames >= itsNoMoveFramesThresh.getVal())
00684             {
00685               this->sayToken("nomove_target", 0, 10);
00686               ++itsStopFrames;
00687             }
00688           else if (!itsKeepTracking.getVal() &&
00689                    itsStopFrames == 0
00690                    && itsBoringness >= itsBoringnessThresh.getVal())
00691             {
00692               this->sayToken("boring_target", 0, 10);
00693               ++itsStopFrames;
00694             }
00695           else
00696             {
00697               const float xerr =
00698                 itsUseHead.getVal()
00699                 ? fabs(0.5 - targetX)
00700                 : fabs(itsPrevTargetX - targetX);
00701 
00702               const float yerr =
00703                 itsUseHead.getVal()
00704                 ? fabs(0.5 - targetY)
00705                 : fabs(itsPrevTargetY - targetY);
00706 
00707               const float err =
00708                 itsUseHead.getVal()
00709                 ? itsBeoHead->trackTarget(0.5, 0.5, targetX, targetY)
00710                 : (xerr + yerr);
00711 
00712               const float errtol = 0.01f * itsErrTolerance.getVal();
00713               const float disttol = 0.01f * itsDistTolerance.getVal();
00714 
00715               if (err > errtol)
00716                 itsBigErrFrames++;
00717 
00718               if (err < errtol && xerr < disttol && yerr < disttol)
00719                 itsNoMoveFrames++;
00720               else
00721                 itsNoMoveFrames = 0;
00722 
00723               LDEBUG("usehead = %d, err = %f (%f+%f), target = %f,%f",
00724                      int(itsUseHead.getVal()),
00725                      err, xerr, yerr, targetX, targetY);
00726 
00727               itsStats.last_err = err;
00728               itsStats.last_xerr = xerr;
00729               itsStats.last_yerr = yerr;
00730               itsStats.err_tol = errtol;
00731               itsStats.dist_tol = disttol;
00732 
00733               itsPrevTargetX = targetX;
00734               itsPrevTargetY = targetY;
00735 
00736               itsState = TRACK_TARGET;
00737             }
00738 
00739             //get head position, if is matches a predefined position then utter a response
00740             if ( (itsBeoHead->getLeftEyeTiltPos() >= itsHeadInfoEyeTiltPos.getVal() - 0.01) &&
00741                  (itsBeoHead->getLeftEyeTiltPos() <= itsHeadInfoEyeTiltPos.getVal() + 0.01) &&
00742 
00743                  (itsBeoHead->getLeftEyePanPos() >= itsHeadInfoEyePanPos.getVal() - 0.1) &&
00744                  (itsBeoHead->getLeftEyePanPos() <= itsHeadInfoEyePanPos.getVal() + 0.1) &&
00745 
00746                  (itsBeoHead->getHeadPanPos() >= itsHeadInfoHeadPanPos.getVal() - 0.01) &&
00747                  (itsBeoHead->getHeadPanPos() <= itsHeadInfoHeadPanPos.getVal() + 0.01)  &&
00748                  itsHeadInfoFrames == 0)
00749             {
00750               if (itsSpeakSaliency.getVal())
00751                 itsSpeechSynth->sendCommand("(wave.play headinfo)", -10, false);
00752               itsHeadInfoFrames++;
00753             }
00754 
00755             if (itsHeadInfoFrames != 0) //dont increment if we are at 0
00756             {
00757               if (itsHeadInfoFrames++ > 50)
00758                 itsHeadInfoFrames = 0;
00759             }
00760 
00761           break;
00762 
00763         default:
00764           break;
00765         }
00766 
00767     }
00768   else
00769     {
00770       enterCheckTargetState();
00771       itsTracking = false;
00772     }
00773 #endif
00774 }
00775 
00776 // ######################################################################
00777 void NeoBrain::saySaliencyVal(byte val)
00778 {
00779   sayToken("new_target", val, 2);
00780 }
00781 
00782 // ######################################################################
00783 void NeoBrain::updateBoringness(const Image<byte>& salmap, byte foaval)
00784 {
00785   Point2D<int> truemaxpos;
00786   byte truemaxval;
00787   findMax(salmap, truemaxpos, truemaxval);
00788 
00789   const int val = int(foaval) - int(truemaxval);
00790   itsBoringness = (0.9f * itsBoringness) + (0.1f * -val);
00791 }
00792 
00793 // ######################################################################
00794 void NeoBrain::updateExcitement(double vcxflicker)
00795 {
00796 
00797   itsExcitementLevel =
00798     0.99f * itsExcitementLevel
00799     + 0.007f * (255.0f - itsBoringness)
00800     + 0.003f * (255.0f * vcxflicker);
00801 
00802   if (itsSleeping.getVal())
00803           itsSleep = 0.95f * itsSleep;
00804   else
00805           itsSleep = 0.995f * itsSleep; //if we are not sleeping, then wait longer
00806 
00807   if (itsSleep > 1000) itsSleep = 1000;
00808   if (itsSleep < 1) itsSleep = 1;
00809   if ( (255.0f * vcxflicker > 200
00810         && (itsState == CHECK_TARGET && !itsPrepSleep))
00811        || itsSleeping.getVal()) //if we are not moving
00812   {
00813           itsSleep += 0.2*(255.0f * vcxflicker);
00814   }
00815 
00816   //After going to sleep wait a few frames before adding back the motion
00817   //This is to avoid motion generated from pausing the cameras
00818   if (itsPrepSleep)
00819   {
00820           itsPrepSleep++;
00821           if (itsPrepSleep > 100)
00822           {
00823               itsPrepSleep = 0;
00824               itsSleep = 0;
00825           }
00826   }
00827 
00828 
00829   //go to sleep
00830   if (itsSleep <= 1 && !itsSleeping.getVal())
00831   {
00832       //itsPrepSleep = 1;
00833       //itsSleeping.setVal(true);
00834       //setUseHead(false);
00835       //sleep(2);
00836       //itsBeoHead->moveRestPos();
00837       //itsSleep = 0;
00838       //setRelaxNeck(true);
00839       //itsBeoHead->relaxHead();
00840       //this->sayText("Good night to all, and to all a good night.", 0, true);
00841       //itsSpeakSaliency.setVal(false);
00842       //sleep(2);
00843       //itsSleep = 0;
00844   }
00845 
00846   //wake up if we are sleeping and not prepreing to sleep
00847   if (itsSleep > 200 && itsSleeping.getVal() && !itsPrepSleep)
00848   {
00849           itsPrepSleep = 0;
00850           itsSpeakSaliency.setVal(true);
00851           this->sayText("Good Morning to you.", 0, false);
00852           setRelaxNeck(false);
00853           sleep(2);
00854           setUseHead(true);
00855           itsSleep = 1000;
00856           itsSleeping.setVal(false);
00857 
00858   }
00859 
00860 
00861 
00862   if (itsExcitementLevel > itsExcitementThresh.getVal())
00863     {
00864       if (itsSpeakSaliency.getVal())
00865         itsSpeechSynth->sendCommand("(wave.play daisy)", -10, false);
00866       itsExcitementLevel = 0;
00867       itsAlmostSinging = false;
00868     }
00869   else if (itsExcitementLevel + 10.f > itsExcitementThresh.getVal())
00870     {
00871       if (!itsAlmostSinging)
00872         this->sayText("You have excited me. Any more excitment "
00873                       "and I will start to sing", 0, false);
00874 
00875       itsAlmostSinging = true;
00876     }
00877   else if (itsAlmostSinging)
00878     {
00879       this->sayText("Now you have stopped exciting me.", 0, false);
00880       itsAlmostSinging = false;
00881     }
00882 }
00883 
00884 void NeoBrain::gotoSleep()
00885 {
00886         itsSleeping.setVal(true);
00887         setUseHead(false);
00888         sleep(2);
00889         itsBeoHead->moveRestPos();
00890         setRelaxNeck(true);
00891         itsBeoHead->relaxHead();
00892         this->sayText("Good night to all, and to all a good night.", 0, true);
00893         itsSpeakSaliency.setVal(false);
00894         sleep(2);
00895         itsSleep = 0;
00896 }
00897 
00898 void NeoBrain::wakeUp()
00899 {
00900         itsSpeakSaliency.setVal(true);
00901         this->sayText("Good Morning to you.", 0, false);
00902         setRelaxNeck(false);
00903         sleep(2);
00904         setUseHead(true);
00905         itsSleeping.setVal(false);
00906 }
00907 
00908 
00909 // ######################################################################
00910 float NeoBrain::getBoringness() const
00911 {
00912   return itsBoringness;
00913 }
00914 
00915 // ######################################################################
00916 float NeoBrain::getExcitementLevel() const
00917 {
00918   return itsExcitementLevel;
00919 }
00920 float NeoBrain::getSleepLevel() const
00921 {
00922         return itsSleep;
00923 }
00924 
00925 // ######################################################################
00926 bool NeoBrain::sayText(const std::string& text, int priority,
00927                        bool block) const
00928 {
00929  // if (itsSpeakSaliency.getVal())
00930   return itsSpeechSynth->sayText(text.c_str(), priority, block);
00931 
00932   // else...
00933   return false;
00934 }
00935 
00936 // ######################################################################
00937 void NeoBrain::paramChanged(ModelParamBase* const param,
00938                             const bool valueChanged,
00939                             ParamClient::ChangeStatus* status)
00940 {
00941   ModelComponent::paramChanged(param, valueChanged, status);
00942 
00943   if (param == &itsRelaxNeck)
00944     {
00945       if (itsRelaxNeck.getVal())
00946         itsBeoHead->relaxNeck();
00947       else
00948         itsBeoHead->moveRestPos();
00949     }
00950   else if (param == &itsRefreshSpeechFile
00951            && valueChanged == true
00952            && itsRefreshSpeechFile.getVal() == true)
00953     {
00954       try
00955         {
00956           TokenMap newmap;
00957           readSpeechFile(newmap, itsSpeechFile.getVal());
00958 
00959           {
00960             GVX_MUTEX_LOCK(&itsSpeechTokenMapMutex);
00961             itsSpeechTokenMap.swap(newmap);
00962           }
00963 
00964           LINFO("reloaded utterances from %s",
00965                 itsSpeechFile.getVal().c_str());
00966         }
00967       catch (...)
00968         {
00969           REPORT_CURRENT_EXCEPTION;
00970           *status = ParamClient::CHANGE_REJECTED;
00971         }
00972     }
00973 }
00974 
00975 // ######################################################################
00976 void NeoBrain::enterCheckTargetState()
00977 {
00978   itsStats.bigerrframes = 0;
00979   itsStats.targetframes = 0;
00980   itsStats.nomoveframes = 0;
00981   itsStats.stopframes = 0;
00982   itsStats.last_err = 0.0f;
00983   itsStats.last_xerr = 0.0f;
00984   itsStats.last_yerr = 0.0f;
00985 
00986   itsTargetFrames = 0;
00987   itsBigErrFrames = 0;
00988   itsNoMoveFrames = 0;
00989   itsStopFrames = 0;
00990   itsPrevTargetX = -1.0f;
00991   itsPrevTargetY = -1.0f;
00992   itsBoringness = 0.0f;
00993   itsLastSpokenLabel = "";
00994   itsState = CHECK_TARGET;
00995 }
00996 
00997 // ######################################################################
00998 /* So things look consistent in everyone's emacs... */
00999 /* Local Variables: */
01000 /* mode: c++ */
01001 /* indent-tabs-mode: nil */
01002 /* End: */
Generated on Sun May 8 08:05:24 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3