LoLGMDExtricateTTI.H

Go to the documentation of this file.
00001 /**
00002    \file  Robots/LoBot/control/LoLGMDExtricateTTI.H
00003    \brief A behaviour for getting the robot unstuck using LGMD spikes and
00004    a virtual force field based on a Bayesian time-to-impact state
00005    estimation loop.
00006 
00007    This file defines a class that implements the virtual force field
00008    concept for moving lobot away from obstacles using time-to-impact
00009    estimates made using LGMD spikes.
00010 
00011    As per the research by Gabbiani, et al., we know that LGMD spikes are
00012    related to the time-to-impact of approaching objects. Unfortunately,
00013    given a spike rate, we cannot easily determine the corresponding
00014    time-to-impact (TTI) because the spike rate function is
00015    non-invertible.
00016 
00017    Thus, for each (actual or virtual) locust connected to the robot, this
00018    behaviour performs Bayesian state estimation in an attempt to
00019    determine the corresponding TTI given spike rate. Once we have TTI, we
00020    can use the velocity information returned by the robot's motor system
00021    to calculate a distance estimate for each locust. This would, in
00022    effect, allow us to use the locust array as a kind of range sensor.
00023 
00024    These distance "readings" can then be used to construct a virtual
00025    force field comprised of repulsive and attractive forces just as we
00026    would do for a real range sensor.
00027 */
00028 
00029 // //////////////////////////////////////////////////////////////////// //
00030 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00031 // by the University of Southern California (USC) and the iLab at USC.  //
00032 // See http://iLab.usc.edu for information about this project.          //
00033 // //////////////////////////////////////////////////////////////////// //
00034 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00035 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00036 // in Visual Environments, and Applications'' by Christof Koch and      //
00037 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00038 // pending; application number 09/912,225 filed July 23, 2001; see      //
00039 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00040 // //////////////////////////////////////////////////////////////////// //
00041 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00042 //                                                                      //
00043 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00044 // redistribute it and/or modify it under the terms of the GNU General  //
00045 // Public License as published by the Free Software Foundation; either  //
00046 // version 2 of the License, or (at your option) any later version.     //
00047 //                                                                      //
00048 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00049 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00050 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00051 // PURPOSE.  See the GNU General Public License for more details.       //
00052 //                                                                      //
00053 // You should have received a copy of the GNU General Public License    //
00054 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00055 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00056 // Boston, MA 02111-1307 USA.                                           //
00057 // //////////////////////////////////////////////////////////////////// //
00058 //
00059 // Primary maintainer for this file: mviswana usc edu
00060 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Robots/LoBot/control/LoLGMDExtricateTTI.H $
00061 // $Id: LoLGMDExtricateTTI.H 13879 2010-09-03 19:52:57Z mviswana $
00062 //
00063 
00064 #ifndef LOBOT_LGMD_EXTRICATE_TTI_DOT_H
00065 #define LOBOT_LGMD_EXTRICATE_TTI_DOT_H
00066 
00067 //------------------------------ HEADERS --------------------------------
00068 
00069 // lobot headers
00070 #include "Robots/LoBot/control/LoBehavior.H"
00071 
00072 #include "Robots/LoBot/tti/LoTTIEstimator.H"
00073 #include "Robots/LoBot/lgmd/LocustModel.H"
00074 
00075 #include "Robots/LoBot/misc/LoVector.H"
00076 #include "Robots/LoBot/misc/factory.hh"
00077 
00078 // Standard C++ headers
00079 #include <string>
00080 #include <vector>
00081 
00082 //----------------------------- NAMESPACE -------------------------------
00083 
00084 namespace lobot {
00085 
00086 //------------------------- CLASS DEFINITION ----------------------------
00087 
00088 /**
00089    \class lobot::LGMDExtricateTTI
00090 
00091    \brief A behaviour for moving the robot away from obstacles by
00092    applying a virtual force field comprised of repulsive and attractive
00093    forces based on time-to-impact estimates derived from LGMD inputs.
00094 
00095    This class implements a behaviour designed to move lobot away from
00096    obstacles obstructing its path by applying the virtual force field
00097    concept on distance "readings" derived from time-to-impact (TTI)
00098    estimates for each (real or virtual) locust connected to the robot.
00099 
00100    Since LGMD spikes are related to the TTI of approaching objects, we
00101    can use Bayesian state estimation to determine the TTI given the
00102    current spike rate. Combining this TTI with the motor system's
00103    velocity info allows us to compute the distance to obstacles for each
00104    locust direction, in effect, converting our array of visual collision
00105    detectors into a range sensor.
00106 
00107    Once we have distance estimates, we can build a virtual force field
00108    wherein distances above a threshold represent attractive forces and
00109    distances below the same threshold exert repulsive forces. The
00110    attractive and repulsive forces are then combined to produce a vector
00111    that is used to decide on an appropriate steering command.
00112 */
00113 class LGMDExtricateTTI : public Behavior {
00114    // Prevent copy and assignment
00115    LGMDExtricateTTI(const LGMDExtricateTTI&) ;
00116    LGMDExtricateTTI& operator=(const LGMDExtricateTTI&) ;
00117 
00118    // Handy type to have around in a derived class
00119    typedef Behavior base ;
00120 
00121    // Boilerplate code to make the generic factory design pattern work
00122    friend  class subfactory<LGMDExtricateTTI, base> ;
00123    typedef register_factory<LGMDExtricateTTI, base> my_factory ;
00124    static  my_factory register_me ;
00125 
00126    /// In effect, this behaviour attempts to convert the array of locusts
00127    /// into a range sensor by estimating the time-to-impact from each
00128    /// locust's LGMD spike rate. This data structure holds the TTI
00129    /// estimators for all the locusts.
00130    std::vector<TTIEstimator*> m_tti ;
00131 
00132    /// Once we have TTI estimates for each locust, we compute distances
00133    /// to obstacles by using the velocity information returned by the
00134    /// motor system. Then, we construct a virtual force field based on
00135    /// these distance "readings." Each distance estimate below a certain
00136    /// threshold exerts a repulsive force on the robot; a distance
00137    /// estimate above the same threhold results in an attractive force.
00138    /// The resulting total force is used to drive and steer the robot
00139    /// away from obstacles.
00140    ///
00141    /// These data members are used to keep track of the attractive,
00142    /// repulsive and total force vectors.
00143    ///
00144    /// DEVNOTE: Actually, we only need to remember these forces for
00145    /// visualization purposes.
00146    Vector m_attractive, m_repulsive, m_total_force ;
00147 
00148    /// In each iteration, this behaviour issues both a drive and a turn
00149    /// command. This structure is a convenient way to hold both these
00150    /// commands together in one place.
00151    struct Command {
00152       int drive ;
00153       int turn  ;
00154 
00155       Command() ;
00156    } ;
00157 
00158    /// This data member holds the most recent commands issued by this
00159    /// behaviour. Useful for visualization.
00160    Command m_cmd ;
00161 
00162    /// A private constructor because behaviours are instantiated with an
00163    /// object factory and not directly by clients.
00164    LGMDExtricateTTI() ;
00165 
00166    /// This method creates the TTI estimators before the behaviour's
00167    /// regular action processing loop begins.
00168    void pre_run() ;
00169 
00170    /// This method implements the behaviour's extrication strategy. As
00171    /// mentioned earlier, it works by determining the sum of attractive
00172    /// and repulsive forces that are computed using distance
00173    /// "measurements" derived from TTI estimates based on LGMD spike
00174    /// rates.
00175    void action() ;
00176 
00177    /// Helper method to record visualization data.
00178    void record_viz(const Vector& att = Vector(),
00179                    const Vector& rep = Vector(),
00180                    const Vector& tot = Vector(),
00181                    const Command& = Command()) ;
00182 
00183    /// Visualization routines to aid with development and debugging.
00184    void render_me() ;
00185 
00186    /// Clean-up.
00187    ~LGMDExtricateTTI() ;
00188 } ;
00189 
00190 //-----------------------------------------------------------------------
00191 
00192 } // end of namespace encapsulating this file's definitions
00193 
00194 #endif
00195 
00196 /* So things look consistent in everyone's emacs... */
00197 /* Local Variables: */
00198 /* indent-tabs-mode: nil */
00199 /* End: */
Generated on Sun May 8 08:41:22 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3