LoTTIEstimator.H

Go to the documentation of this file.
00001 /**
00002    \file  Robots/LoBot/tti/LoTTIEstimator.H
00003    \brief A Bayesian time-to-impact state estimator.
00004 
00005    This file defines a class that estimates time-to-impact from LGMD
00006    spikes.
00007 
00008    As per the research by Gabbiani, et al., we know that LGMD spikes are
00009    related to the time-to-impact of approaching objects. Unfortunately,
00010    given a spike rate, we cannot easily determine the corresponding
00011    time-to-impact (TTI) because the spike rate function is
00012    non-invertible.
00013 
00014    Thus, for each (actual or virtual) locust connected to the robot, we
00015    will have a corresponding TTI estimator that will perform Bayesian
00016    state estimation in an attempt to determine the corresponding TTI
00017    given spike rate. Once we have the TTI for a locust, we can use the
00018    velocity information returned by the robot's motor system to calculate
00019    a distance estimate for that locust. This would, in effect, allow us
00020    to use the locust array as a kind of range sensor.
00021 */
00022 
00023 // //////////////////////////////////////////////////////////////////// //
00024 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00025 // by the University of Southern California (USC) and the iLab at USC.  //
00026 // See http://iLab.usc.edu for information about this project.          //
00027 // //////////////////////////////////////////////////////////////////// //
00028 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00029 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00030 // in Visual Environments, and Applications'' by Christof Koch and      //
00031 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00032 // pending; application number 09/912,225 filed July 23, 2001; see      //
00033 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00034 // //////////////////////////////////////////////////////////////////// //
00035 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00036 //                                                                      //
00037 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00038 // redistribute it and/or modify it under the terms of the GNU General  //
00039 // Public License as published by the Free Software Foundation; either  //
00040 // version 2 of the License, or (at your option) any later version.     //
00041 //                                                                      //
00042 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00043 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00044 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00045 // PURPOSE.  See the GNU General Public License for more details.       //
00046 //                                                                      //
00047 // You should have received a copy of the GNU General Public License    //
00048 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00049 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00050 // Boston, MA 02111-1307 USA.                                           //
00051 // //////////////////////////////////////////////////////////////////// //
00052 //
00053 // Primary maintainer for this file: mviswana usc edu
00054 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Robots/LoBot/tti/LoTTIEstimator.H $
00055 // $Id: LoTTIEstimator.H 14018 2010-09-23 07:10:34Z mviswana $
00056 //
00057 
00058 #ifndef LOBOT_TTI_ESTIMATOR_DOT_H
00059 #define LOBOT_TTI_ESTIMATOR_DOT_H
00060 
00061 //------------------------------ HEADERS --------------------------------
00062 
00063 // lobot headers
00064 #include "Robots/LoBot/ui/LoDrawable.H"
00065 #include "Robots/LoBot/tti/LoSensorModel.H"
00066 #include "Robots/LoBot/lgmd/LocustModel.H"
00067 
00068 #include "Robots/LoBot/misc/LoVector.H"
00069 #include "Robots/LoBot/misc/wma.hh"
00070 #include "Robots/LoBot/util/range.hh"
00071 
00072 // Standard C++ headers
00073 #include <deque>
00074 #include <vector>
00075 
00076 //----------------------------- NAMESPACE -------------------------------
00077 
00078 namespace lobot {
00079 
00080 //------------------------- CLASS DEFINITION ----------------------------
00081 
00082 /**
00083    \class lobot::TTIEstimator
00084 
00085    \brief Encapsulation of Bayesian time-to-impact estimation using LGMD
00086    spike rates.
00087 
00088    This class implements a time-to-impact (TTI) estimator for each (real
00089    or virtual) locust connected to the robot.
00090 
00091    Since LGMD spikes are related to the TTI of approaching objects, we
00092    can use Bayesian state estimation to determine the TTI given the
00093    current spike rate. Combining this TTI with the motor system's
00094    velocity info allows us to compute the distance to obstacles for each
00095    locust direction, in effect, converting our array of visual collision
00096    detectors into a range sensor.
00097 */
00098 class TTIEstimator : public Drawable {
00099    // Prevent copy and assignment
00100    TTIEstimator(const TTIEstimator&) ;
00101    TTIEstimator& operator=(const TTIEstimator&) ;
00102 
00103    /// Each instance of this class must be "linked" to a corresponding
00104    /// locust.
00105    const LocustModel* m_locust ;
00106 
00107    /// The LGMD spike rate is a function of an approaching object's
00108    /// time-to-impact. Unfortunately, that function is not invertible,
00109    /// meaning that we cannot simply apply an inverse function to go
00110    /// from spike rate to TTI. Therefore, we use Bayesian state
00111    /// estimation to determine an approximate TTI given a locust's
00112    /// current LGMD spike rate.
00113    ///
00114    /// The state estimation works by discretizing the possible TTI
00115    /// values and continually updating a posterior probability
00116    /// distribution (aka belief) every time we get new sensor (i.e.,
00117    /// LGMD) readings. The discretization is specified in the config
00118    /// file via a range of TTI values and a step size.
00119    ///
00120    /// This data structure holds the current belief state.
00121    //@{
00122    typedef std::vector<float> Belief ;
00123    Belief m_belief ;
00124    //@}
00125 
00126    /// When we update the belief, we also update the current estimate of
00127    /// the time-to-impact by using the belief state with the maximum
00128    /// likelilood value and store that estimate in this variable.
00129    float m_tti ;
00130 
00131    /// This variable keeps track of the likelilood value associated with
00132    /// the belief state that currently has the max likelilood.
00133    float m_confidence ;
00134 
00135    /// These two data structures are used to maintain a history of recent
00136    /// actual and predicted TTI or distance values for visualization
00137    /// purposes.
00138    std::deque<float> m_actual, m_predicted ;
00139 
00140    /// Each locust is setup to look in a particular direction.
00141    /// lobot::LocustModel specifies that direction in polar form. For
00142    /// this class, we prefer a Cartesian vector representation, which
00143    /// gets used in the distance computation based on the TTI
00144    /// estimate.
00145    ///
00146    /// DEVNOTE: This data member is used simply to avoid having to
00147    /// keep computing the same vector over and over. Each locust is
00148    /// initialized to look in some direction. That direction doesn't
00149    /// change. When we want to compute a distance reading from the TTI
00150    /// estimate, we will have to project the robot's velocity vector
00151    /// onto the locust's direction vector. Since the locust's
00152    /// direction vector will always be the same, creating a vector by
00153    /// computing the sine and cosine of the direction stored in
00154    /// m_locust is somewhat wasteful. So we compute it once when the
00155    /// estimator is created.
00156    const Vector m_direction ;
00157 
00158    /// Although the TTI estimator can obtain the LGMD spike rate for
00159    /// its locust directly using the above data member, we choose to
00160    /// instead copy that value before performing the Bayesian state
00161    /// estimation so as to minimize the amount of time spent holding
00162    /// on to the Robolocust update lock.
00163    ///
00164    /// DEVNOTE: If we don't copy the individual spike rates, client
00165    /// behaviours will have to hold the update lock for quite a while as
00166    /// the estimators compute the TTI for each locust using the recursive
00167    /// Bayesian update equations. This can hold up the main thread's
00168    /// sensorimotor updates. Bad Idea.
00169    ///
00170    /// DEVNOTE 2: The LGMD-vs-TTI curve can be partitioned into two
00171    /// phases, viz., LOOMING and BLANKING. To detect the signal peak that
00172    /// divides these two phases, we need to keep track of the second
00173    /// derivative of the LGMD signal. Therefore, we need the two most
00174    /// recent LGMD values and also two values for the first derivative.
00175    /// The second derivative itself is low-pass filtered to avoid sudden
00176    /// transitions from one state to the other.
00177    //@{
00178    float m_lgmd[2] ;   // input signal
00179    float m_fder[2] ;   // first derivative
00180    wma<float> m_sder ; // low-pass filtered second derivative
00181    //@}
00182 
00183    /// After the TTI estimate has been made, client behaviours can gauge
00184    /// the distance to obstacles in each locust direction by projecting
00185    /// the robot's current velocity in that direction and getting the
00186    /// estimator to compute an estimate of the distance based on its TTI
00187    /// belief. This data member is used to hold the current distance
00188    /// estimate for the locust to which this TTI estimator is linked.
00189    float m_distance ;
00190 
00191    /// In order for the whole Bayesian state estimation to work, we need
00192    /// a sensor model that supplies probability values for LGMD spike
00193    /// rates given times-to-impact, i.e., a table of P(lgmd|tti), the
00194    /// so-called causal data.
00195    ///
00196    /// The LGMD spike rate is a function of an approaching object's
00197    /// time-to-impact. When the object is far away, the LGMD's spike rate
00198    /// will be fairly low. As it approaches, the LGMD starts firing very
00199    /// rapidly. Shortly before impact, the LGMD firing rate reaches a
00200    /// peak and then drops off sharply until impact.
00201    ///
00202    /// The peak described above "partitions" the LGMD firing rate vs.
00203    /// time-to-impact curve into two distinct "phases." We refer to the
00204    /// first phase, wherein the curve rises to its peak, as LOOMING
00205    /// because the object is looming large in the LGMD's field of view.
00206    /// The second phase, we call BLANKING because feedforward inhibition
00207    /// kicks in after the peak to shutdown the LGMD right before impact.
00208    ///
00209    /// To get the Bayesian time-to-impact estimation to work well, it
00210    /// would be best to use different causal likelihood profiles
00211    /// corresponding to each of these two "phases" described above. These
00212    /// two functions provide sensor models for the two different
00213    /// likelihood profiles.
00214    //@{
00215 public:
00216    static SensorModel&  looming_sensor_model() ;
00217    static SensorModel& blanking_sensor_model() ;
00218 private:
00219    //@}
00220 
00221    /// This member variable points to the correct sensor model depending
00222    /// on the LGMD input signal's phase as described above. Thus, in the
00223    /// LOOMING phase, it will point to lobot::TTIEstimator::m_looming;
00224    /// and to lobot::TTIEstimator::m_blanking in the BLANKING phase.
00225    const SensorModel* m_sensor_model ;
00226 
00227    /// This enumeration specifies symbols for the different LGMD phases
00228    /// recognized by the TTI estimator.
00229    enum LGMDPhase {
00230       LOOMING,
00231       BLANKING,
00232    } ;
00233 
00234    /// This method returns the current phase of the input LGMD signal by
00235    /// checking what the current sensor model is pointing to.
00236    LGMDPhase lgmd_phase() const ;
00237 
00238    /// This method switches the sensor model to the specified likelihood
00239    /// profile, taking care to perform any necessary mutex locking (e.g.,
00240    /// to avoid clashes with the visualization thread).
00241    void sensor_model(const SensorModel*) ;
00242 
00243 public:
00244    /// When instantiating a TTI estimator, client behaviours must specify
00245    /// the corresponding locust for which the estimator is to determine
00246    /// time-to-impact given LGMD spike rate.
00247    TTIEstimator(const LocustModel*) ;
00248 
00249    /// Before performing the TTI estimation, client behaviours must first
00250    /// copy the LGMD spike rates from the locusts into their
00251    /// corresponding estimators. This two-step approach is necessary to
00252    /// minimize the duration that client behaviours will hold on to the
00253    /// Robolocust update locks.
00254    void copy_lgmd() ;
00255 
00256    /// This method implements the recursive Bayesian state estimation
00257    /// update equations in order to determine the time-to-impact given
00258    /// an LGMD spike rate.
00259    void update() ;
00260 
00261    /// This method projects the supplied velocity vector (assumed to
00262    /// be the robot's current velocity) onto this TTI estimator's
00263    /// locust's direction vector and uses the latest estimate of the
00264    /// time-to-impact to compute a corresponding obstacle distance in
00265    /// that locust's direction.
00266    void compute_distance(const Vector& velocity) ;
00267 
00268    /// Accessors.
00269    //@{
00270    float  lgmd()               const {return m_lgmd[0]       ;}
00271    float  actual_tti()         const {return m_locust->tti() ;}
00272    float  predicted_tti()      const {return m_tti           ;}
00273    float  actual_distance()    const {return m_locust->distance() ;}
00274    float  predicted_distance() const {return m_distance           ;}
00275    float  distance()           const {return predicted_distance() ;}
00276    float  confidence()         const {return m_confidence         ;}
00277    float  locust_direction()   const {return m_locust->direction();}
00278    Vector direction()          const {return m_direction          ;}
00279    range<int> lrf_range()      const {return m_locust->get_lrf_range() ;}
00280    //@}
00281 
00282 private:
00283    /// Visualization
00284    //@{
00285    static void render_hook(unsigned long) ;
00286    void render_belief() ;
00287    void render_tti() ;
00288    void render_distance() ;
00289    //@}
00290 
00291 public:
00292    /// Clean-up.
00293    ~TTIEstimator() ;
00294 } ;
00295 
00296 //-----------------------------------------------------------------------
00297 
00298 } // end of namespace encapsulating this file's definitions
00299 
00300 #endif
00301 
00302 /* So things look consistent in everyone's emacs... */
00303 /* Local Variables: */
00304 /* indent-tabs-mode: nil */
00305 /* End: */
Generated on Sun May 8 08:41:31 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3