NeoBrainVss.C

Go to the documentation of this file.
00001 /*!@file Neuro/NeoBrainVss.C for the vss demos */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Lior Elazary <elazary@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Neuro/NeoBrainVss.C $
00035 // $Id: NeoBrainVss.C 10794 2009-02-08 06:21:09Z itti $
00036 //
00037 
00038 #ifndef NEURO_NEOBRAIN_C_DEFINED
00039 #define NEURO_NEOBRAIN_C_DEFINED
00040 
00041 #include "Neuro/NeoBrain.H"
00042 
00043 #include "GUI/DebugWin.H"
00044 #include "Component/ModelOptionDef.H"
00045 #include "Component/OptionManager.H" // for REQUEST_OPTIONALIAS_NEURO()
00046 #include "Image/MathOps.H" // for findMax()
00047 #include "Neuro/NeuroOpts.H"
00048 #include "rutz/error_context.h"
00049 #include "rutz/mutex.h"
00050 #include "rutz/sfmt.h"
00051 
00052 #include <algorithm>
00053 
00054 static const ModelOptionDef OPT_NeobrainSpeakSaliency =
00055   { MODOPT_FLAG, "NeobrainSpeakSaliency", &MOC_BRAIN, OPTEXP_CORE,
00056     "Whether to use speech to speak about saliency in NeoBrain",
00057     "neobrain-speak-saliency", '\0', "", "false" };
00058 
00059 static const ModelOptionDef OPT_NeobrainSpeakObjects =
00060   { MODOPT_FLAG, "NeobrainSpeakObjects", &MOC_BRAIN, OPTEXP_CORE,
00061     "Whether to use speech to speak about objects in NeoBrain",
00062     "neobrain-speak-objects", '\0', "", "false" };
00063 
00064 static const ModelOptionDef OPT_NeobrainSpeechFile =
00065   { MODOPT_ARG_STRING, "NeobrainSpeechFile", &MOC_BRAIN, OPTEXP_CORE,
00066     "Speech utterances for various speech tokens",
00067     "neobrain-speech-file", '\0', "<filename>", "etc/speech.pmap" };
00068 
00069 static const ModelOptionDef OPT_NeobrainBoringnessThresh =
00070   { MODOPT_ARG(int), "NeobrainBoringnessThresh", &MOC_BRAIN, OPTEXP_CORE,
00071     "Threshold for boringness beyond which we start a new track",
00072     "neobrain-boringness-thresh", '\0', "<int>", "80" };
00073 
00074 static const ModelOptionDef OPT_NeobrainTrackDelayFrames =
00075   { MODOPT_ARG(unsigned long), "NeobrainTrackDelayFrames", &MOC_BRAIN, OPTEXP_CORE,
00076     "Number of frames to wait after a shift before starting tracking",
00077     "neobrain-track-delay-frames", '\0', "<ulong>", "20" };
00078 
00079 static const ModelOptionDef OPT_NeobrainStopTrackDelayFrames =
00080   { MODOPT_ARG(int), "NeobrainStopTrackDelayFrames", &MOC_BRAIN, OPTEXP_CORE,
00081     "Number of frames to wait after deciding to stop tracking before\n"
00082     "the tracking is actually disengaged. A value of -1 will never stop. ",
00083     "neobrain-stop-track-delay-frames", '\0', "<int>", "10" };
00084 
00085 static const ModelOptionDef OPT_TrackTarget =
00086   { MODOPT_FLAG, "TrackTarget", &MOC_BRAIN, OPTEXP_CORE,
00087     "Whether to start up in target tracking mode.",
00088     "track-target", '\0', ""
00089 #ifdef HAVE_OPENCV
00090     , "true"
00091 #else
00092     , "false" // we can't do tracking without OpenCV
00093 #endif
00094   };
00095 
00096 static const ModelOptionDef OPT_NeobrainKeepTracking =
00097   { MODOPT_FLAG, "NeobrainKeepTracking", &MOC_BRAIN, OPTEXP_CORE,
00098     "If this option is true, the the brain will try to keep tracking the\n"
00099     "object for as long as posible",
00100     "neobrain-keeptracking", '\0', "", "false" };
00101 
00102 
00103 // ######################################################################
00104 NeoBrain::NeoBrain(OptionManager& mgr, const std::string& descrName,
00105                    const std::string& tagName)
00106   :
00107   ModelComponent(mgr, descrName, tagName),
00108   itsAllowTracking(&OPT_TrackTarget, this, ALLOW_ONLINE_CHANGES),
00109   itsKeepTracking(&OPT_NeobrainKeepTracking, this, ALLOW_ONLINE_CHANGES),
00110   itsUseHead("NeobrainUseHead", this, true, ALLOW_ONLINE_CHANGES),
00111   itsRelaxNeck("NeobrainRelaxNeck", this, true, ALLOW_ONLINE_CHANGES),
00112   itsSleeping("Sleeping", this, false, ALLOW_ONLINE_CHANGES),
00113   itsBoringnessThresh(&OPT_NeobrainBoringnessThresh, this, ALLOW_ONLINE_CHANGES),
00114   itsErrTolerance("NeobrainErrTolerance", this, 1, ALLOW_ONLINE_CHANGES),
00115   itsDistTolerance("NeobrainDistTolerance", this, 2, ALLOW_ONLINE_CHANGES),
00116   itsTrackDelayFrames(&OPT_NeobrainTrackDelayFrames, this,
00117                       ALLOW_ONLINE_CHANGES),
00118   itsBigErrFramesThresh("NeobrainBigErrFramesThresh", this, 500,
00119                         ALLOW_ONLINE_CHANGES),
00120   itsTargetFramesThresh("NeobrainTargetFramesThresh", this, 300,
00121                         ALLOW_ONLINE_CHANGES),
00122   itsNoMoveFramesThresh("NeobrainNoMoveFramesThresh", this, 1000,
00123                         ALLOW_ONLINE_CHANGES),
00124   itsStopTrackDelayFrames(&OPT_NeobrainStopTrackDelayFrames, this,
00125                           ALLOW_ONLINE_CHANGES),
00126   itsHeadInfoEyeTiltPos("HeadInfoEyeTiltPos", this, -1.00,
00127                           ALLOW_ONLINE_CHANGES),
00128   itsHeadInfoEyePanPos("HeadInfoEyePanPos", this, 0.20,
00129                           ALLOW_ONLINE_CHANGES),
00130   itsHeadInfoHeadPanPos("HeadInfoHeadPanPos", this, -1.00,
00131                           ALLOW_ONLINE_CHANGES),
00132   itsSpeakSaliency(&OPT_NeobrainSpeakSaliency, this, ALLOW_ONLINE_CHANGES),
00133   itsSpeakObjects(&OPT_NeobrainSpeakObjects, this, ALLOW_ONLINE_CHANGES),
00134   itsSpeechFile(&OPT_NeobrainSpeechFile, this),
00135   itsRefreshSpeechFile("NeobrainRefreshSpeechFile", this, false,
00136                        ALLOW_ONLINE_CHANGES),
00137   itsExcitementThresh("NeobrainExcitementThresh", this, 220.f,
00138                       ALLOW_ONLINE_CHANGES),
00139   itsTargetFrames(0),
00140   itsBigErrFrames(0),
00141   itsNoMoveFrames(0),
00142   itsStopFrames(0),
00143   itsHeadInfoFrames(0),
00144   itsPrevTargetX(-1.0f),
00145   itsPrevTargetY(-1.0f),
00146   itsBoringness(0.0f),
00147   itsBoringCount(0),
00148   itsExcitementLevel(0.0f),
00149   itsSleep(1000.0f),
00150   itsPrepSleep(0),
00151   itsAlmostSinging(false)
00152 {
00153   itsBeoHead = nub::soft_ref<BeoHead>(new BeoHead(mgr));
00154   addSubComponent(itsBeoHead);
00155 
00156   // Instantiate our various ModelComponents:
00157   itsSpeechSynth = nub::soft_ref<SpeechSynth>(new SpeechSynth(mgr));
00158   addSubComponent(itsSpeechSynth);
00159 
00160   if (0 != pthread_mutex_init(&itsSpeechTokenMapMutex, NULL))
00161     LFATAL("pthread_mutex_init() failed");
00162 
00163 #ifdef HAVE_OPENCV
00164   this->points[0] = NULL;
00165   this->points[1] = NULL;
00166   this->status = NULL;
00167   this->pyramid = NULL;
00168   this->prev_pyramid = NULL;
00169 #else
00170   // we can't do tracking without OpenCV, so no point in allowing the
00171   // user to try to turn it on:
00172   itsAllowTracking.setInactive(true);
00173 #endif
00174 }
00175 
00176 // ######################################################################
00177 NeoBrain::~NeoBrain()
00178 {
00179   if (0 != pthread_mutex_destroy(&itsSpeechTokenMapMutex))
00180     LERROR("pthread_mutex_destroy() failed");
00181 
00182 #ifdef HAVE_OPENCV
00183   //cvFree(&this->points[0]);
00184   //cvFree(&this->points[1]);
00185   //cvFree(&this->status);
00186   cvReleaseImage(&this->pyramid);
00187   cvReleaseImage(&this->prev_pyramid);
00188 #endif
00189 }
00190 
00191 // ######################################################################
00192 void NeoBrain::start2()
00193 {
00194   itsBeoHead->relaxNeck();
00195 
00196   if (itsSpeakSaliency.getVal())
00197     if (!readSpeechFile(itsSpeechTokenMap, itsSpeechFile.getVal()))
00198       itsSpeakSaliency.setVal(false);
00199 
00200   if (itsSpeakSaliency.getVal())
00201     saveSpeechFile(itsSpeechTokenMap, "backup.pmap");
00202 }
00203 
00204 // ######################################################################
00205 void NeoBrain::init(Dims imageDims, int nPoints, int wz )
00206 {
00207   win_size = wz;
00208 
00209 #ifdef HAVE_OPENCV
00210   MAX_COUNT = nPoints;
00211   count = 0;
00212   points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
00213   points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
00214 
00215   prev_grey = Image<byte>(imageDims, ZEROS);
00216   pyramid = cvCreateImage( cvSize(imageDims.w(), imageDims.h()), 8, 1 );
00217   prev_pyramid = cvCreateImage( cvSize(imageDims.w(), imageDims.h()), 8, 1 );
00218   status = (char*)cvAlloc(MAX_COUNT);
00219 #endif
00220 
00221   flags = 0;
00222   itsState = CHECK_TARGET;
00223   itsImageDims = imageDims;
00224   itsTracking = false;
00225 
00226   if (itsSpeakSaliency.getVal())
00227   {
00228     itsSpeechSynth->sendCommand("(lex.add.entry '(\"bigpause\" n (((pau) 1) ((pau) 1) ((pau) 1) ((pau) 1))))\n", -10, true);
00229 
00230     itsSpeechSynth->sendCommand("(set! daisy (wave.load \"daisy.wav\"))",
00231         -10, true);
00232     itsSpeechSynth->sendCommand("(set! headinfo (wave.load \"headInfo.wav\"))",
00233         -10, true);
00234   }
00235 }
00236 
00237 
00238 // ######################################################################
00239 bool NeoBrain::readSpeechFile(TokenMap& tokenMap,
00240                               const std::string& fname)
00241 {
00242   rutz::shared_ptr<ParamMap> pmap;
00243 
00244   try
00245     {
00246       pmap = ParamMap::loadPmapFile(fname);
00247     }
00248   catch (std::exception& e)
00249     {
00250       REPORT_CURRENT_EXCEPTION;
00251       return false;
00252     }
00253 
00254   ASSERT(pmap.is_valid());
00255 
00256   GVX_ERR_CONTEXT(rutz::sfmt("unpacking pmap file %s", fname.c_str()));
00257 
00258   tokenMap.clear();
00259 
00260   const int numTokenTypes = pmap->getIntParam("NUM_TOKEN_TYPES");
00261   for (int i = 0; i < numTokenTypes; ++i)
00262     {
00263       rutz::shared_ptr<ParamMap> submap =
00264         pmap->getSubpmap(sformat("TOKEN_TYPE_%d", i));
00265 
00266       GVX_ERR_CONTEXT(rutz::sfmt("unpacking TOKEN_TYPE_%d", i));
00267 
00268       const std::string name = submap->getStringParam("NAME");
00269 
00270       TokenType toktype;
00271 
00272       const int numTokens = submap->getIntParam("NUM_TOKENS");
00273       for (int k = 0; k < numTokens; ++k)
00274         {
00275           rutz::shared_ptr<ParamMap> subsubmap =
00276             submap->getSubpmap(sformat("TOKEN_%d", k));
00277 
00278           GVX_ERR_CONTEXT(rutz::sfmt("unpacking TOKEN_%d", k));
00279 
00280           SpeechToken tok;
00281           tok.low = subsubmap->getIntParam("LOW");
00282           tok.high = subsubmap->getIntParam("HIGH");
00283 
00284           const int numTextItems = subsubmap->getIntParam("NUM_TEXT_ITEMS");
00285 
00286           for (int j = 0; j < numTextItems; ++j)
00287             {
00288               tok.textList.push_back(subsubmap->getStringParam
00289                                      (sformat("TEXT_ITEM_%d", j)));
00290             }
00291 
00292           toktype.tokens.push_back(tok);
00293         }
00294 
00295       tokenMap[name] = toktype;
00296     }
00297 
00298   return true;
00299 }
00300 
00301 // ######################################################################
00302 void NeoBrain::saveSpeechFile(const TokenMap& tokenMap,
00303                               const std::string& fname)
00304 {
00305   rutz::shared_ptr<ParamMap> pmap(new ParamMap);
00306 
00307   int numTokenTypes = 0;
00308   for (TokenMap::const_iterator
00309          itr = tokenMap.begin(), stop = tokenMap.end();
00310        itr != stop; ++itr)
00311     {
00312       rutz::shared_ptr<ParamMap> submap(new ParamMap);
00313 
00314       submap->putStringParam("NAME", (*itr).first);
00315 
00316       int numTokens = 0;
00317       for (size_t j = 0; j < (*itr).second.tokens.size(); ++j)
00318         {
00319           rutz::shared_ptr<ParamMap> subsubmap(new ParamMap);
00320 
00321           subsubmap->putIntParam("LOW", (*itr).second.tokens[j].low);
00322           subsubmap->putIntParam("HIGH", (*itr).second.tokens[j].high);
00323           int numTextItems = 0;
00324           for (size_t i = 0; i < (*itr).second.tokens[j].textList.size(); ++i)
00325             {
00326               subsubmap->putStringParam(sformat("TEXT_ITEM_%d", numTextItems),
00327                                         (*itr).second.tokens[j].textList[i]);
00328               ++numTextItems;
00329             }
00330           subsubmap->putIntParam("NUM_TEXT_ITEMS", numTextItems);
00331 
00332           submap->putSubpmap(sformat("TOKEN_%d", numTokens), subsubmap);
00333 
00334           ++numTokens;
00335         }
00336 
00337       submap->putIntParam("NUM_TOKENS", numTokens);
00338 
00339       pmap->putSubpmap(sformat("TOKEN_TYPE_%d", numTokenTypes), submap);
00340 
00341       ++numTokenTypes;
00342     }
00343   pmap->putIntParam("NUM_TOKEN_TYPES", numTokenTypes);
00344 
00345   pmap->format(fname);
00346 }
00347 
00348 // ######################################################################
00349 std::string NeoBrain::getToken(const std::string& token, int val) const
00350 {
00351   if (!itsSpeakSaliency.getVal())
00352     return std::string();
00353 
00354   std::string result;
00355 
00356   {
00357     GVX_MUTEX_LOCK(&itsSpeechTokenMapMutex);
00358 
00359     TokenMap::const_iterator itr = itsSpeechTokenMap.find(token);
00360     if (itr == itsSpeechTokenMap.end())
00361       LERROR("no such speech token: %s", token.c_str());
00362     else
00363       result = (*itr).second.getTextItemForVal(val);
00364   }
00365 
00366   return result;
00367 }
00368 
00369 // ######################################################################
00370 bool NeoBrain::sayToken(const std::string& token,
00371                         int val, int priority) const
00372 {
00373   const std::string text = this->getToken(token, val);
00374 
00375   if (text.length() == 0)
00376     return false;
00377 
00378   return this->sayText(text, priority, false);
00379 }
00380 
00381 // ######################################################################
00382 bool NeoBrain::sayObjectLabel(const std::string& label,
00383                               int confidence, bool forceLabel)
00384 {
00385   if ( (label == "nomatch"
00386         && label == "none"
00387         && label == ""
00388         && label != itsLastSpokenLabel) ||
00389         forceLabel)
00390     {
00391 //       const std::string intro = this->getToken("object_intro", confidence);
00392 //       if (!intro.empty()
00393 //           &&
00394 //           this->sayText(sformat("%s %s", intro.c_str(), label.c_str()),
00395 //                         /* priority = */ 0,
00396 //                         /* block = */ false))
00397       if (itsSpeakObjects.getVal()
00398           &&
00399           itsSpeechSynth->playWavFile(label + ".wav",
00400                                       /* priority = */ -5,
00401                                       /* block = */ false))
00402         {
00403           setKeepTracking(false);
00404           itsLastSpokenLabel = label;
00405           return true;
00406         }
00407     }
00408 
00409   itsLastSpokenLabel = "";
00410 
00411   return false;
00412 }
00413 
00414 // ######################################################################
00415 void NeoBrain::setTarget(const Point2D<int> loc, const Image<byte>& grey,
00416                          const int saliencyval, bool changeState)
00417 {
00418   if (!itsAllowTracking.getVal())
00419     return;
00420 
00421 #ifdef HAVE_OPENCV
00422   count = MAX_COUNT;
00423 
00424   IplImage* tmp = img2ipl(grey);
00425   if (count > 1)
00426   {
00427     IplImage* eig = cvCreateImage(cvGetSize(tmp), 32, 1);
00428     IplImage* temp = cvCreateImage(cvGetSize(tmp), 32, 1);
00429     double quality = 0.01;
00430     double min_distance = 5;
00431 
00432     cvGoodFeaturesToTrack(tmp, eig, temp, points[1], &count,
00433         quality, min_distance, 0, 3, 0, 0.04);
00434     cvReleaseImage(&eig);
00435     cvReleaseImage(&temp);
00436 
00437   } else {
00438     //get from the saliency map
00439     points[1][0].x = loc.i;
00440     points[1][0].y = loc.j;
00441 
00442   }
00443   cvFindCornerSubPix(tmp, points[1], count,
00444       cvSize(win_size,win_size), cvSize(-1,-1),
00445       cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,
00446         20,0.03));
00447   cvReleaseImageHeader(&tmp);
00448 
00449   IplImage *swap_temp;
00450   prev_grey = grey;
00451   CV_SWAP( prev_pyramid, pyramid, swap_temp );
00452   CV_SWAP( points[0], points[1], swap_points );
00453 
00454 
00455   //LINFO("Init %i point (%f,%f)\n", count, points[1][0].x, points[1][0].y);
00456 
00457   if (changeState)
00458     itsState = CHECK_TARGET;
00459   itsTracking = true;
00460 #endif
00461 
00462   if (saliencyval >= 0)
00463     this->saySaliencyVal(byte(saliencyval));
00464 }
00465 
00466 // ######################################################################
00467 Point2D<int> NeoBrain::trackObject(const Image<byte>& grey)
00468 {
00469   itsRefreshSpeechFile.setVal(false);
00470 
00471   Point2D<int> targetLoc(-1,-1);
00472 
00473 #ifdef HAVE_OPENCV
00474   if (itsAllowTracking.getVal() && itsTracking)
00475   {
00476     if (count > 0)
00477     {
00478       IplImage* tmp1 = img2ipl(prev_grey);
00479       IplImage* tmp2 = img2ipl(grey);
00480 
00481       cvCalcOpticalFlowPyrLK(tmp1, tmp2, prev_pyramid, pyramid,
00482                              points[0], points[1], count,
00483                              cvSize(win_size,win_size), 3, status, 0,
00484                              cvTermCriteria(CV_TERMCRIT_ITER
00485                                             |CV_TERMCRIT_EPS,
00486                                             20,0.03), flags);
00487 
00488       cvReleaseImageHeader(&tmp1);
00489       cvReleaseImageHeader(&tmp2);
00490 
00491       flags |= CV_LKFLOW_PYR_A_READY;
00492 
00493       //show track points
00494       int k, i;
00495       for(i = k = 0; i<count; i++)
00496       {
00497         if (!status[i])
00498           continue;
00499         points[1][k++] = points[1][i];
00500 
00501         targetLoc.i = std::min(grey.getWidth()-1, std::max(0, (int)points[1][i].x));
00502         targetLoc.j = std::min(grey.getHeight()-1, std::max(0, (int)points[1][i].y));
00503         ASSERT(grey.coordsOk(targetLoc));
00504       }
00505       count = k;
00506 
00507     }
00508 
00509     IplImage *swap_temp;
00510     CV_SWAP( prev_pyramid, pyramid, swap_temp );
00511     CV_SWAP( points[0], points[1], swap_points );
00512 
00513     moveHeadToTarget();
00514   }
00515   prev_grey = grey;
00516 #endif
00517 
00518   return targetLoc;
00519 }
00520 
00521 // ######################################################################
00522 std::vector<Point2D<int> > NeoBrain::getTrackersLoc(const Image<byte>& grey)
00523 {
00524   std::vector<Point2D<int> > trackersLoc;
00525 
00526 #ifdef HAVE_OPENCV
00527   if (itsAllowTracking.getVal() && itsTracking)
00528   {
00529     if (count > 0)
00530     {
00531       IplImage* tmp1 = img2ipl(prev_grey);
00532       IplImage* tmp2 = img2ipl(grey);
00533 
00534       cvCalcOpticalFlowPyrLK(tmp1, tmp2, prev_pyramid, pyramid,
00535                              points[0], points[1], count,
00536                              cvSize(win_size,win_size), 3, status, 0,
00537                              cvTermCriteria(CV_TERMCRIT_ITER
00538                                             |CV_TERMCRIT_EPS,
00539                                             20,0.03), flags);
00540 
00541       cvReleaseImageHeader(&tmp1);
00542       cvReleaseImageHeader(&tmp2);
00543 
00544       flags |= CV_LKFLOW_PYR_A_READY;
00545 
00546       //show track points
00547       int k, i;
00548       for(i = k = 0; i<count; i++)
00549       {
00550         if (!status[i])
00551           continue;
00552         points[1][k++] = points[1][i];
00553 
00554         Point2D<int> tracker(std::min(grey.getWidth()-1, std::max(0, (int)points[1][i].x)),
00555             std::min(grey.getHeight()-1, std::max(0, (int)points[1][i].y)));
00556         trackersLoc.push_back(tracker);
00557       }
00558       count = k;
00559 
00560     }
00561 
00562     IplImage *swap_temp;
00563     CV_SWAP( prev_pyramid, pyramid, swap_temp );
00564     CV_SWAP( points[0], points[1], swap_points );
00565   }
00566   prev_grey = grey;
00567 #endif
00568 
00569   return trackersLoc;
00570 }
00571 
00572 
00573 // ######################################################################
00574 void NeoBrain::moveHeadToTarget()
00575 {
00576 #ifdef HAVE_OPENCV
00577   if (count > 0)
00578     {
00579       float targetX = points[1][0].x/itsImageDims.w();
00580       float targetY = points[1][0].y/itsImageDims.h();
00581 
00582       itsStats.bigerrframes        = itsBigErrFrames;
00583       itsStats.bigerrframes_thresh = itsBigErrFramesThresh.getVal();
00584       itsStats.targetframes        = itsTargetFrames;
00585       itsStats.targetframes_thresh = itsTargetFramesThresh.getVal();
00586       itsStats.nomoveframes        = itsNoMoveFrames;
00587       itsStats.nomoveframes_thresh = itsNoMoveFramesThresh.getVal();
00588       itsStats.stopframes          = itsStopFrames;
00589       itsStats.stopframes_thresh   = itsStopTrackDelayFrames.getVal();
00590 
00591 
00592       itsStats.leftEyePanPos   =  itsBeoHead->getLeftEyePanPos();
00593       itsStats.leftEyeTiltPos  =  itsBeoHead->getLeftEyeTiltPos();
00594       itsStats.rightEyePanPos  =  itsBeoHead->getRightEyePanPos();
00595       itsStats.rightEyeTiltPos =  itsBeoHead->getRightEyeTiltPos();
00596       itsStats.headPanPos      =  itsBeoHead->getHeadPanPos();
00597       itsStats.headTiltPos     =  itsBeoHead->getHeadTiltPos();
00598       itsStats.headYawPos      =  itsBeoHead->getHeadYawPos();
00599 
00600       switch(itsState)
00601         {
00602         case CHECK_TARGET:
00603           itsTargetFrames++;
00604           if (targetX > 1.0 || targetX < 0 ||
00605               targetY > 1.0 || targetY < 0)
00606             {
00607               enterCheckTargetState();
00608               itsTracking = false;
00609             }
00610           else if (itsTargetFrames > itsTrackDelayFrames.getVal())
00611             {
00612               itsPrevTargetX = targetX;
00613               itsPrevTargetY = targetY;
00614               itsState = TRACK_TARGET;
00615             }
00616           break;
00617 
00618         case TRACK_TARGET:
00619           itsTargetFrames++;
00620 
00621           if (!itsKeepTracking.getVal() &&
00622               itsStopFrames > 0)
00623             ++itsStopFrames;
00624 
00625           LDEBUG("itsBigErrFrames=%lu (thresh %lu), "
00626                  "itsTargetFrames=%lu (thresh %lu), "
00627                  "itsNoMoveFrames=%lu (thresh %lu), "
00628                  "itsStopFrames=%lu (thresh %i), "
00629                  "itsBoringness=%.2f (thresh %d)",
00630                  itsBigErrFrames, itsBigErrFramesThresh.getVal(),
00631                  itsTargetFrames, itsTargetFramesThresh.getVal(),
00632                  itsNoMoveFrames, itsNoMoveFramesThresh.getVal(),
00633                  itsStopFrames, itsStopTrackDelayFrames.getVal(),
00634                  itsBoringness, itsBoringnessThresh.getVal());
00635 
00636           if (itsStopTrackDelayFrames.getVal() != -1 &&
00637               (int)itsStopFrames >= itsStopTrackDelayFrames.getVal())
00638             {
00639               enterCheckTargetState();
00640               itsTracking = false;
00641             }
00642           else if (itsStopFrames == 0
00643               && itsBigErrFrames >= itsBigErrFramesThresh.getVal())
00644             {
00645               this->sayToken("notrack_target", 0, 10);
00646               ++itsStopFrames;
00647             }
00648           else if (!itsKeepTracking.getVal() &&
00649                    itsStopFrames == 0
00650                    && itsTargetFrames >= itsTargetFramesThresh.getVal())
00651             {
00652               this->sayToken("tiresome_target", 0, 10);
00653               ++itsStopFrames;
00654             }
00655           else if (!itsKeepTracking.getVal() &&
00656                    itsStopFrames == 0
00657                    && itsNoMoveFrames >= itsNoMoveFramesThresh.getVal())
00658             {
00659               this->sayToken("nomove_target", 0, 10);
00660               ++itsStopFrames;
00661             }
00662           else if (!itsKeepTracking.getVal() &&
00663                    itsStopFrames == 0
00664                    && itsBoringness >= itsBoringnessThresh.getVal())
00665             {
00666               this->sayToken("boring_target", 0, 10);
00667               ++itsStopFrames;
00668             }
00669           else
00670             {
00671               const float xerr =
00672                 itsUseHead.getVal()
00673                 ? fabs(0.5 - targetX)
00674                 : fabs(itsPrevTargetX - targetX);
00675 
00676               const float yerr =
00677                 itsUseHead.getVal()
00678                 ? fabs(0.5 - targetY)
00679                 : fabs(itsPrevTargetY - targetY);
00680 
00681               const float err =
00682                 itsUseHead.getVal()
00683                 ? itsBeoHead->trackTarget(0.5, 0.5, targetX, targetY)
00684                 : (xerr + yerr);
00685 
00686               const float errtol = 0.01f * itsErrTolerance.getVal();
00687               const float disttol = 0.01f * itsDistTolerance.getVal();
00688 
00689               if (err > errtol)
00690                 itsBigErrFrames++;
00691 
00692               if (err < errtol && xerr < disttol && yerr < disttol)
00693                 itsNoMoveFrames++;
00694               else
00695                 itsNoMoveFrames = 0;
00696 
00697               LDEBUG("usehead = %d, err = %f (%f+%f), target = %f,%f",
00698                      int(itsUseHead.getVal()),
00699                      err, xerr, yerr, targetX, targetY);
00700 
00701               itsStats.last_err = err;
00702               itsStats.last_xerr = xerr;
00703               itsStats.last_yerr = yerr;
00704               itsStats.err_tol = errtol;
00705               itsStats.dist_tol = disttol;
00706 
00707               itsPrevTargetX = targetX;
00708               itsPrevTargetY = targetY;
00709 
00710               itsState = TRACK_TARGET;
00711             }
00712 
00713             //get head position, if is matches a predefined position then utter a response
00714             if ( (itsBeoHead->getLeftEyeTiltPos() >= itsHeadInfoEyeTiltPos.getVal() - 0.01) &&
00715                  (itsBeoHead->getLeftEyeTiltPos() <= itsHeadInfoEyeTiltPos.getVal() + 0.01) &&
00716 
00717                  (itsBeoHead->getLeftEyePanPos() >= itsHeadInfoEyePanPos.getVal() - 0.1) &&
00718                  (itsBeoHead->getLeftEyePanPos() <= itsHeadInfoEyePanPos.getVal() + 0.1) &&
00719 
00720                  (itsBeoHead->getHeadPanPos() >= itsHeadInfoHeadPanPos.getVal() - 0.01) &&
00721                  (itsBeoHead->getHeadPanPos() <= itsHeadInfoHeadPanPos.getVal() + 0.01)  &&
00722                  itsHeadInfoFrames == 0)
00723             {
00724               if (itsSpeakSaliency.getVal())
00725                 itsSpeechSynth->sendCommand("(wave.play headinfo)", -10, false);
00726               itsHeadInfoFrames++;
00727             }
00728 
00729             if (itsHeadInfoFrames != 0) //dont increment if we are at 0
00730             {
00731               if (itsHeadInfoFrames++ > 50)
00732                 itsHeadInfoFrames = 0;
00733             }
00734 
00735           break;
00736 
00737         default:
00738           break;
00739         }
00740 
00741     }
00742   else
00743     {
00744       enterCheckTargetState();
00745     }
00746 #endif
00747 }
00748 
00749 // ######################################################################
00750 void NeoBrain::saySaliencyVal(byte val)
00751 {
00752   sayToken("new_target", val, 2);
00753 }
00754 
00755 // ######################################################################
00756 void NeoBrain::updateBoringness(const Image<byte>& salmap, byte foaval)
00757 {
00758   Point2D<int> truemaxpos;
00759   byte truemaxval;
00760   findMax(salmap, truemaxpos, truemaxval);
00761 
00762   const int val = int(foaval) - int(truemaxval);
00763   itsBoringness = (0.9f * itsBoringness) + (0.1f * -val);
00764 }
00765 
00766 // ######################################################################
00767 void NeoBrain::updateExcitement(double vcxflicker)
00768 {
00769 
00770   itsExcitementLevel =
00771     0.99f * itsExcitementLevel
00772     + 0.007f * (255.0f - itsBoringness)
00773     + 0.003f * (255.0f * vcxflicker);
00774 
00775   if (itsSleeping.getVal())
00776           itsSleep = 0.95f * itsSleep;
00777   else
00778           itsSleep = 0.995f * itsSleep; //if we are not sleeping, then wait longer
00779 
00780   if (itsSleep > 1000) itsSleep = 1000;
00781   if (itsSleep < 1) itsSleep = 1;
00782   if ( (255.0f * vcxflicker > 200
00783         && (itsState == CHECK_TARGET && !itsPrepSleep))
00784        || itsSleeping.getVal()) //if we are not moving
00785   {
00786           itsSleep += 0.2*(255.0f * vcxflicker);
00787   }
00788 
00789   //After going to sleep wait a few frames before adding back the motion
00790   //This is to avoid motion generated from pausing the cameras
00791   if (itsPrepSleep)
00792   {
00793           itsPrepSleep++;
00794           if (itsPrepSleep > 100)
00795           {
00796               itsPrepSleep = 0;
00797               itsSleep = 0;
00798           }
00799   }
00800 
00801 
00802   //go to sleep
00803   if (itsSleep <= 1 && !itsSleeping.getVal())
00804   {
00805       //itsPrepSleep = 1;
00806       //itsSleeping.setVal(true);
00807       //setUseHead(false);
00808       //sleep(2);
00809       //itsBeoHead->moveRestPos();
00810       //itsSleep = 0;
00811       //setRelaxNeck(true);
00812       //itsBeoHead->relaxHead();
00813       //this->sayText("Good night to all, and to all a good night.", 0, true);
00814       //itsSpeakSaliency.setVal(false);
00815       //sleep(2);
00816       //itsSleep = 0;
00817   }
00818 
00819   //wake up if we are sleeping and not prepreing to sleep
00820   if (itsSleep > 200 && itsSleeping.getVal() && !itsPrepSleep)
00821   {
00822           itsPrepSleep = 0;
00823           itsSpeakSaliency.setVal(true);
00824           this->sayText("Good Morning to you.", 0, false);
00825           setRelaxNeck(false);
00826           sleep(2);
00827           setUseHead(true);
00828           itsSleep = 1000;
00829           itsSleeping.setVal(false);
00830 
00831   }
00832 
00833 
00834 
00835   if (itsExcitementLevel > itsExcitementThresh.getVal())
00836     {
00837       if (itsSpeakSaliency.getVal())
00838         itsSpeechSynth->sendCommand("(wave.play daisy)", -10, false);
00839       itsExcitementLevel = 0;
00840       itsAlmostSinging = false;
00841     }
00842   else if (itsExcitementLevel + 10.f > itsExcitementThresh.getVal())
00843     {
00844       if (!itsAlmostSinging)
00845         this->sayText("You have excited me. Any more excitment "
00846                       "and I will start to sing", 0, false);
00847 
00848       itsAlmostSinging = true;
00849     }
00850   else if (itsAlmostSinging)
00851     {
00852       this->sayText("Now you have stopped exciting me.", 0, false);
00853       itsAlmostSinging = false;
00854     }
00855 }
00856 
00857 void NeoBrain::gotoSleep()
00858 {
00859         itsSleeping.setVal(true);
00860         setUseHead(false);
00861         sleep(2);
00862         itsBeoHead->moveRestPos();
00863         setRelaxNeck(true);
00864         itsBeoHead->relaxHead();
00865         this->sayText("Good night to all, and to all a good night.", 0, true);
00866         itsSpeakSaliency.setVal(false);
00867         sleep(2);
00868         itsSleep = 0;
00869 }
00870 
00871 void NeoBrain::wakeUp()
00872 {
00873         itsSpeakSaliency.setVal(true);
00874         this->sayText("Good Morning to you.", 0, false);
00875         setRelaxNeck(false);
00876         sleep(2);
00877         setUseHead(true);
00878         itsSleeping.setVal(false);
00879 }
00880 
00881 
00882 // ######################################################################
00883 float NeoBrain::getBoringness() const
00884 {
00885   return itsBoringness;
00886 }
00887 
00888 // ######################################################################
00889 float NeoBrain::getExcitementLevel() const
00890 {
00891   return itsExcitementLevel;
00892 }
00893 float NeoBrain::getSleepLevel() const
00894 {
00895         return itsSleep;
00896 }
00897 
00898 // ######################################################################
00899 bool NeoBrain::sayText(const std::string& text, int priority,
00900                        bool block) const
00901 {
00902   if (itsSpeakSaliency.getVal())
00903     return itsSpeechSynth->sayText(text.c_str(), priority, block);
00904 
00905   // else...
00906   return false;
00907 }
00908 
00909 // ######################################################################
00910 void NeoBrain::paramChanged(ModelParamBase* const param,
00911                             const bool valueChanged,
00912                             ParamClient::ChangeStatus* status)
00913 {
00914   ModelComponent::paramChanged(param, valueChanged, status);
00915 
00916   if (param == &itsRelaxNeck)
00917     {
00918       if (itsRelaxNeck.getVal())
00919         itsBeoHead->relaxNeck();
00920       else
00921         itsBeoHead->moveRestPos();
00922     }
00923   else if (param == &itsRefreshSpeechFile
00924            && valueChanged == true
00925            && itsRefreshSpeechFile.getVal() == true)
00926     {
00927       try
00928         {
00929           TokenMap newmap;
00930           readSpeechFile(newmap, itsSpeechFile.getVal());
00931 
00932           {
00933             GVX_MUTEX_LOCK(&itsSpeechTokenMapMutex);
00934             itsSpeechTokenMap.swap(newmap);
00935           }
00936 
00937           LINFO("reloaded utterances from %s",
00938                 itsSpeechFile.getVal().c_str());
00939         }
00940       catch (...)
00941         {
00942           REPORT_CURRENT_EXCEPTION;
00943           *status = ParamClient::CHANGE_REJECTED;
00944         }
00945     }
00946 }
00947 
00948 // ######################################################################
00949 void NeoBrain::enterCheckTargetState()
00950 {
00951   itsStats.bigerrframes = 0;
00952   itsStats.targetframes = 0;
00953   itsStats.nomoveframes = 0;
00954   itsStats.stopframes = 0;
00955   itsStats.last_err = 0.0f;
00956   itsStats.last_xerr = 0.0f;
00957   itsStats.last_yerr = 0.0f;
00958 
00959   itsTargetFrames = 0;
00960   itsBigErrFrames = 0;
00961   itsNoMoveFrames = 0;
00962   itsStopFrames = 0;
00963   itsPrevTargetX = -1.0f;
00964   itsPrevTargetY = -1.0f;
00965   itsBoringness = 0.0f;
00966   itsLastSpokenLabel = "";
00967   itsState = CHECK_TARGET;
00968 }
00969 
00970 // ######################################################################
00971 /* So things look consistent in everyone's emacs... */
00972 /* Local Variables: */
00973 /* mode: c++ */
00974 /* indent-tabs-mode: nil */
00975 /* End: */
00976 
00977 #endif // NEURO_NEOBRAIN_C_DEFINED
Generated on Sun May 8 08:05:24 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3