LoStafford.H

Go to the documentation of this file.
00001 /**
00002    \file Robots/LoBot/lgmd/rind/LoStafford.H
00003 
00004    \brief Stafford's LGMD model.
00005 
00006    This file defines a class that implements the LGMD model described in:
00007 
00008       Stafford, R., Santer, R. D., Rind, F. C.
00009       ``A Bio-inspired Visual Collision Detection Mechanism for Cars:
00010         Combining Insect Inspired Neurons to Create a Robust System.''
00011       BioSystems, 87, 162--169, 2007.
00012 */
00013 
00014 // //////////////////////////////////////////////////////////////////// //
00015 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00016 // by the University of Southern California (USC) and the iLab at USC.  //
00017 // See http://iLab.usc.edu for information about this project.          //
00018 // //////////////////////////////////////////////////////////////////// //
00019 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00020 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00021 // in Visual Environments, and Applications'' by Christof Koch and      //
00022 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00023 // pending; application number 09/912,225 filed July 23, 2001; see      //
00024 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00025 // //////////////////////////////////////////////////////////////////// //
00026 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00027 //                                                                      //
00028 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00029 // redistribute it and/or modify it under the terms of the GNU General  //
00030 // Public License as published by the Free Software Foundation; either  //
00031 // version 2 of the License, or (at your option) any later version.     //
00032 //                                                                      //
00033 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00034 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00035 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00036 // PURPOSE.  See the GNU General Public License for more details.       //
00037 //                                                                      //
00038 // You should have received a copy of the GNU General Public License    //
00039 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00040 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00041 // Boston, MA 02111-1307 USA.                                           //
00042 // //////////////////////////////////////////////////////////////////// //
00043 //
00044 // Primary maintainer for this file: mviswana usc edu
00045 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Robots/LoBot/lgmd/rind/LoStafford.H $
00046 // $Id: LoStafford.H 12860 2010-02-18 15:55:20Z mviswana $
00047 //
00048 
00049 #ifndef LOBOT_STAFFORD_LGMD_MODEL_DOT_H
00050 #define LOBOT_STAFFORD_LGMD_MODEL_DOT_H
00051 
00052 //------------------------------ HEADERS --------------------------------
00053 
00054 // lobot headers
00055 #include "Robots/LoBot/lgmd/LocustModel.H"
00056 
00057 #include "Robots/LoBot/misc/LoTypes.H"
00058 #include "Robots/LoBot/misc/factory.hh"
00059 #include "Robots/LoBot/misc/singleton.hh"
00060 
00061 // INVT image support
00062 #include "Image/Image.H"
00063 
00064 //----------------------------- NAMESPACE -------------------------------
00065 
00066 namespace lobot {
00067 
00068 //------------------------- CLASS DEFINITION ----------------------------
00069 
00070 /**
00071    \class lobot::StaffordModel
00072    \brief Implementation of Stafford's LGMD model.
00073 
00074    This class implements the LGMD model described in the following paper:
00075 
00076       Stafford, R., Santer, R. D., Rind, F. C.
00077       ``A Bio-inspired Visual Collision Detection Mechanism for Cars:
00078         Combining Insect Inspired Neurons to Create a Robust System.''
00079       BioSystems, 87, 162--169, 2007.
00080 
00081    The above paper computes LGMD spikes using a three-layer neural
00082    network. The final layer is a summing layer which spits out an LGMD
00083    membrane potential. Spikes are generated when this potential crosses a
00084    predetermined threshold.
00085 
00086    However, in this implementation, we use the raw membrane potential
00087    computed by the S-layer as the final LGMD "value."
00088 
00089    Furthermore, this implementation does not (yet) take care of the
00090    directionally sensitive motion detector (DSMD) as described in the
00091    paper. Thus, it does not quite suppress lateral motion. This means
00092    that any motion detected by the model, whether on a collision course
00093    or not, will result in LGMD spikes, which, of course, is not faithful
00094    to how the actual LGMD in real locusts operates.
00095 
00096    This is alright in our case because a separate portion of the
00097    Lobot/Robolocust framework takes care of integrating multiple LGMD
00098    signals into a coherent steering angle. It is this integration
00099    algorithm part that will suppress lateral motion and other ill
00100    effects that might drive our locust-controlled robot astray.
00101 */
00102 class StaffordModel : public LocustModel {
00103    // Prevent copy and assignment
00104    StaffordModel(const StaffordModel&) ;
00105    StaffordModel& operator=(const StaffordModel&) ;
00106 
00107    // Handy type to have around
00108    typedef LocustModel base ;
00109 
00110    // Boilerplate code to make the factory work
00111    friend  class subfactory<StaffordModel, base, base::InitParams> ;
00112    typedef register_factory<StaffordModel, base, base::InitParams> my_factory ;
00113    static  my_factory register_me ;
00114 
00115    /**
00116       \class StaffordModel::layer
00117 
00118       The Stafford model uses a multi-layer neural network to compute
00119       LGMD spikes. Some of the connections between the layers are delayed
00120       by one time-step. This convenience structure holds the image for
00121       the current and previous time-stpes as it propagates through the
00122       layers.
00123    */
00124    struct layer {
00125       GrayImage previous ;
00126       GrayImage current ;
00127    } ;
00128 
00129    /// As mentioned above, the Stafford model computes LGMD spikes using
00130    /// a multi-layer neural network. The L-layer is not actually part of
00131    /// the Stafford model. We use it for the sake of convenience. It
00132    /// caches the luminance values of the composited input image from the
00133    /// various video sources Lobot/Robolocust reads from.
00134    ///
00135    /// The P-layer corresponds to the photoreceptor layer described in
00136    /// the Stafford paper. This layer is responsible for computing the
00137    /// basic motion detection using a simple subtraction between the
00138    /// previous and current images in the L-layer.
00139    ///
00140    /// The I-layer is the inhibition layer. It convolves the results of
00141    /// the P-layer from the current and previous time-steps with a fixed
00142    /// 3x3 kernel.
00143    ///
00144    /// The S-layer is the summation layer that takes the results from the
00145    /// P and I layers and produces an LGMD membrane potential by a simple
00146    /// summation. However, prior to the summation, negative values in the
00147    /// S-layer are discarded.
00148    ///
00149    /// Multiple instances of the Stafford model will all share the same
00150    /// source of input images with the only real difference between the
00151    /// instances being the subportion of the input image each one reads.
00152    /// This is done so as to simulate multiple locusts looking in
00153    /// different directions with limited (but overlapping) fields of
00154    /// view. Nonetheless, since they all share the same source, it makes
00155    /// sense for all instances to also share the different layers.
00156    ///
00157    /// This allows us to perform the computations for each layer just
00158    /// once. Then, each instance can read its assigned subportion of the
00159    /// S-layer to compute its LGMD potential.
00160    //@{
00161    static layer l_layer ;
00162    static layer p_layer ;
00163    static layer i_layer ;
00164    static layer s_layer ;
00165    //@}
00166 
00167    /// To save on the amount of number-crunching involved in computing
00168    /// LGMD spikes using a multi-layer neural net, we have all instances
00169    /// of the Stafford model share the different layers and perform the
00170    /// necessary subtractions, convolutions and summations just once.
00171    /// Then, each instance reads its assigned subportion of the final
00172    /// layer of the neural net to compute the LGMD membrane potential for
00173    /// itself.
00174    ///
00175    /// To get this setup to work, we need to keep track of the number of
00176    /// instances of this class and the number of instances that have been
00177    /// updated. Only the first one to be updated will result in the layer
00178    /// computations. And the last one will result in the layers being
00179    /// reset for the next round.
00180    //@{
00181    static int m_instances ;
00182    static int m_layers_computed ;
00183    //@}
00184 
00185    /// Private constructor because this model is instantiated using a
00186    /// factory and accessed solely through the interface provided by its
00187    /// abstract base class.
00188    StaffordModel(const base::InitParams&) ;
00189 
00190    /// These methods perform the LGMD computations.
00191    //@{
00192    void update() ;
00193    void compute_layers() ;
00194    void reset_layers() ;
00195    void prime_previous_layers() ;
00196    bool suppress_lgmd(const float spikes[], const float potentials[]) ;
00197 
00198    template<typename rect_dim, typename rect_comp, typename pot_comp>
00199    float compute_dsmd_potential(rect_dim, rect_comp, pot_comp) ;
00200    //@}
00201 
00202    /// Private destructor because this model is instantiated using a
00203    /// factory and accessed solely through the interface provided by its
00204    /// abstract base class.
00205    ~StaffordModel() ;
00206 
00207    // In addition to the LGMD, the Stafford model also implements four
00208    // directionally sensitive motion detectors (DSMD neurons). And
00209    // variations of it use a feed-forward inhibition neuron as well. It is
00210    // convenient to club potentials, etc. for all these neurons into a
00211    // single array indexed by the following symbols.
00212    enum {
00213       LGMD,      FFI,
00214       DSMD_LEFT, DSMD_RIGHT,
00215       DSMD_UP,   DSMD_DOWN,
00216       NUM_NEURONS
00217    } ;
00218 
00219    /// This inner class encapsulates various parameters that can be used
00220    /// to tweak different aspects of the LGMD model implemented by the
00221    /// StaffordModel class.
00222    class Params : public singleton<Params> {
00223       // Initialization
00224       Params() ; // private because this is a singleton
00225       friend class singleton<Params> ;
00226 
00227       /// LGMD, FFI and DSMD spike counts are computed by scaling down
00228       /// their raw membrane potentials to a number in the range [.5,1]
00229       /// and then counting one spike when that number exceeds the
00230       /// corresponding threshold in the following array.
00231       float m_spike_thresholds[NUM_NEURONS] ;
00232 
00233       /// To decide whether the LGMD neural network should emit a spike
00234       /// in response to the stimuli detected by each of the component
00235       /// neurons (i.e., the LGMD, the FFI cell and the 4 DSMDs), we can
00236       /// combine their individual spikes using a weighted sum.
00237       ///
00238       /// NOTE: This weighted sum procedure is an invention of the author
00239       /// of this class and is not described in any of the LGMD related
00240       /// papers by Stafford, Blanchard, Yue, Rind and gang. It can be
00241       /// turned off by setting the weight for the LGMD to one and all
00242       /// the others to zero.
00243       float m_spike_weights[NUM_NEURONS] ;
00244 
00245       /// The formula used to scale the raw membrane potentials down to
00246       /// the [.5,1] range is the following sigmoid function:
00247       ///
00248       ///                                       1
00249       ///                           u = -----------------
00250       ///                               1 + exp(-U/(n*N))
00251       ///
00252       /// where U is the raw membrane potential obtained by summing the
00253       /// relevant portion(s) of the S-layer; N is the total number of
00254       /// pixels in the locust's FOV; and n is a fudge factor defined
00255       /// below.
00256       ///
00257       /// This term acts as a scaling factor that magnifies the total
00258       /// visual area of our virtual locust. It doesn't necessarily make
00259       /// any real sense; it's really just a knob that seems to get
00260       /// things working a wee bit smoother when twiddled just right.
00261       ///
00262       /// Since the DSMD membrane potentials are computed using a
00263       /// multiplication and addition, they tend to be quite large
00264       /// numbers (usually in the order of millions). This makes it
00265       /// necessary to quell them quite a bit compared to the LGMD
00266       /// membrane potential. Thus, we use an array of area magnification
00267       /// factors instead of just one for all the neurons.
00268       float m_area_magnifiers[NUM_NEURONS] ;
00269 
00270       /// The LGMD spike rate is computed as a running average of the
00271       /// spike count using the usual formula:
00272       ///       running average = w*current + (1-w)*old
00273       ///
00274       /// The following parameter specifies the value of w in the above
00275       /// formula. It should be a number between 0 and 1. Higher values
00276       /// indicate greater confidence in the current readings while lower
00277       /// ones give greater weight to older readings.
00278       ///
00279       /// For this model, the raw membrane potentials tend to be quite
00280       /// jumpy. So lower running average weights tend to do a better job
00281       /// of smoothing out the overall spike rate.
00282       float m_running_average_weight ;
00283 
00284       /// Since the FFI and DSMDs don't seem to work too well, they can
00285       /// be turned off.
00286       bool m_ffi_on ;
00287       bool m_dsmd_on ;
00288 
00289       /// If the DSMDs are on, the following thresholds will be applied
00290       /// to check for horizontal and vertical lateral motions. Here's
00291       /// how these thresholds work: let us say that we want to check for
00292       /// horizontal lateral motion and its threshold is 10. Then, we
00293       /// will measure the membrane potentials of the left and right
00294       /// DSMDs and if one is greater than the other by a factor of 10,
00295       /// we conclude that there is lateral motion in the horizontal
00296       /// direction. Ditto for the vertical case.
00297       float m_horizontal_motion_threshold ;
00298       float m_vertical_motion_threshold ;
00299 
00300       /// The DSMD potential is computed by using blocks of pixels in the
00301       /// S-layer that run across the center of the entire image along
00302       /// both axes. In their paper, Stafford et al. use 10x10 blocks. In
00303       /// our case, we try to do that too. However, if this size doesn't
00304       /// quite work out, we fall back to an alternative (smaller) size
00305       /// to be able to cover the entire image width and height while
00306       /// still getting at least 2-3 EMDs.
00307       int m_ideal_dsmd_block_size ;
00308       int m_alt_dsmd_block_size ;
00309 
00310    public:
00311       // Accessing the various parameters
00312       static const float* spike_thresholds() ;
00313       static const float* spike_weights() ;
00314       static const float* area_magnifiers() ;
00315       static float running_average_weight() ;
00316       static bool ffi_on() ;
00317       static bool dsmd_on() ;
00318       static float horizontal_motion_threshold() ;
00319       static float vertical_motion_threshold() ;
00320       static int ideal_dsmd_block_size() ;
00321       static int alt_dsmd_block_size() ;
00322 
00323       // Clean-up
00324       ~Params() ;
00325    } ;
00326 } ;
00327 
00328 //-----------------------------------------------------------------------
00329 
00330 } // end of namespace encapsulating this file's definitions
00331 
00332 #endif
00333 
00334 /* So things look consistent in everyone's emacs... */
00335 /* Local Variables: */
00336 /* indent-tabs-mode: nil */
00337 /* End: */
Generated on Sun May 8 08:41:30 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3