LoCalibrateLET.C

Go to the documentation of this file.
00001 /**
00002    \file  Robots/LoBot/control/LoCalibrateLET.C
00003    \brief This file defines the non-inline member functions of the
00004    lobot::CalibrateLET class.
00005 */
00006 
00007 // //////////////////////////////////////////////////////////////////// //
00008 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00009 // by the University of Southern California (USC) and the iLab at USC.  //
00010 // See http://iLab.usc.edu for information about this project.          //
00011 // //////////////////////////////////////////////////////////////////// //
00012 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00013 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00014 // in Visual Environments, and Applications'' by Christof Koch and      //
00015 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00016 // pending; application number 09/912,225 filed July 23, 2001; see      //
00017 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00018 // //////////////////////////////////////////////////////////////////// //
00019 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00020 //                                                                      //
00021 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00022 // redistribute it and/or modify it under the terms of the GNU General  //
00023 // Public License as published by the Free Software Foundation; either  //
00024 // version 2 of the License, or (at your option) any later version.     //
00025 //                                                                      //
00026 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00027 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00028 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00029 // PURPOSE.  See the GNU General Public License for more details.       //
00030 //                                                                      //
00031 // You should have received a copy of the GNU General Public License    //
00032 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00033 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00034 // Boston, MA 02111-1307 USA.                                           //
00035 // //////////////////////////////////////////////////////////////////// //
00036 //
00037 // Primary maintainer for this file: mviswana usc edu
00038 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Robots/LoBot/control/LoCalibrateLET.C $
00039 // $Id: LoCalibrateLET.C 13732 2010-07-29 14:16:35Z mviswana $
00040 //
00041 
00042 //------------------------------ HEADERS --------------------------------
00043 
00044 // lobot headers
00045 #include "Robots/LoBot/control/LoCalibrateLET.H"
00046 
00047 #include "Robots/LoBot/tti/LoTTIEstimator.H"
00048 #include "Robots/LoBot/tti/LoSensorModel.H"
00049 #include "Robots/LoBot/lgmd/gabbiani/LoGabbiani.H"
00050 
00051 #include "Robots/LoBot/config/LoConfigHelpers.H"
00052 #include "Robots/LoBot/misc/LoRegistry.H"
00053 
00054 #include "Robots/LoBot/util/LoGL.H"
00055 #include "Robots/LoBot/util/LoMath.H"
00056 #include "Robots/LoBot/util/range.hh"
00057 #include "Robots/LoBot/util/triple.hh"
00058 
00059 // OpenGL headers
00060 #ifdef INVT_HAVE_LIBGLU
00061 #include <GL/glu.h>
00062 #endif
00063 
00064 #ifdef INVT_HAVE_LIBGL
00065 #include <GL/gl.h>
00066 #endif
00067 
00068 // Standard C++ headers
00069 #include <sstream>
00070 #include <algorithm>
00071 #include <vector>
00072 
00073 //----------------------------- NAMESPACE -------------------------------
00074 
00075 namespace lobot {
00076 
00077 //------------------------------ GLOBALS --------------------------------
00078 
00079 // Robolocust uses two sensor models: one for the LOOMING phase of the
00080 // LGMD signal and the other for the BLANKING phase. This variable is
00081 // used to point at one of the sensor models. The one it points to will
00082 // be the one with the "input focus," i.e., '+' and '-' keypresses will
00083 // result in incrementing/decrementing the sigma associated with that
00084 // particular sensor model. The TAB key is used to switch input focus to
00085 // the other sensor model.
00086 SensorModel* g_sensor_model ;
00087 
00088 // Every time the user presses '+' or '-' to increment/decrement the
00089 // sigma associated with the "active" sensor model, we will have to
00090 // update the GL texture object used to visualize that sensor model. This
00091 // flag is used to let the rendering function know that it needs to
00092 // re-create the GL texture using the latest sensor model probabilities.
00093 static bool g_update_flag ;
00094 
00095 //--------------------------- LOCAL HELPERS -----------------------------
00096 
00097 // Retrieve settings from calibrate_lgmd_extricate_tti section of config file
00098 template<typename T>
00099 static inline T conf(const std::string& key, const T& default_value)
00100 {
00101    return get_conf<T>(LOBE_CALIBRATE_LET, key, default_value) ;
00102 }
00103 
00104 //-------------------------- INITIALIZATION -----------------------------
00105 
00106 CalibrateLET::CalibrateLET()
00107    : base(clamp(conf("update_delay", 1500), 1000, 5000),
00108           LOBE_CALIBRATE_LET,
00109           conf<std::string>("geometry", "0 0 320 320"))
00110 {
00111    start(LOBE_CALIBRATE_LET) ;
00112 }
00113 
00114 // Setup sensor models
00115 void CalibrateLET::pre_run()
00116 {
00117    g_sensor_model = &TTIEstimator::looming_sensor_model() ;
00118    TTIEstimator::blanking_sensor_model() ; // force creation of this object
00119 }
00120 
00121 //---------------------- THE BEHAVIOUR'S ACTION -------------------------
00122 
00123 // This behaviour is actually an interactive means of generating and
00124 // visualizing the causal probabilities used in the Bayesian
00125 // time-to-impact sensor model. Therefore, it relies on keypress events
00126 // to get its work done rather than regular action processing like
00127 // "normal" behaviours.
00128 void CalibrateLET::action(){}
00129 
00130 void CalibrateLET::keypress(unsigned char key)
00131 {
00132    SensorModel* looming  = &TTIEstimator::looming_sensor_model()  ;
00133    SensorModel* blanking = &TTIEstimator::blanking_sensor_model() ;
00134 
00135    float dsigma = 0 ;
00136    switch (key)
00137    {
00138       case '\t': // toggle LOOMING/BLANKING sensor model
00139          viz_lock() ;
00140             if (g_sensor_model == looming)
00141                g_sensor_model = blanking ;
00142             else if (g_sensor_model == blanking)
00143                g_sensor_model = looming ;
00144             g_update_flag = false ;
00145          viz_unlock() ;
00146          return ;
00147 
00148       case 'u': // up
00149       case 'i': // increment
00150       case 'k': // go up one line (vi)
00151       case '+':
00152          dsigma = Params::dsigma() ;
00153          break ;
00154 
00155       case 'd': // down/decrement
00156       case 'j': // go down one line (vi)
00157       case '-':
00158          dsigma = -Params::dsigma() ;
00159          break ;
00160 
00161       default:
00162          return ;
00163    }
00164 
00165    g_sensor_model->update(dsigma) ;
00166    viz_lock() ;
00167       g_update_flag = true ;
00168    viz_unlock() ;
00169 }
00170 
00171 //--------------------------- VISUALIZATION -----------------------------
00172 
00173 #ifdef INVT_HAVE_LIBGLU
00174 
00175 namespace {
00176 
00177 // Helper class for encapsulating the GL textures used to visualize the
00178 // probabilities contained in the Robolocust sensor models.
00179 class GLTexture {
00180    const SensorModel* m_sensor_model ;
00181    range<float> m_prob_range ;
00182    unsigned int m_name ;
00183    int m_width, m_height ;
00184 public:
00185    GLTexture() ;
00186    void init(const SensorModel*) ;
00187    int  width()  const {return m_width  ;}
00188    int  height() const {return m_height ;}
00189    range<float> prob_range() const {return m_prob_range ;}
00190    void update() ;
00191    void render(float left, float right, float bottom, float top) const ;
00192    void cleanup() ;
00193 } ;
00194 
00195 GLTexture::GLTexture()
00196    : m_sensor_model(0), m_prob_range(0, 1), m_name(0), m_width(0), m_height(0)
00197 {}
00198 
00199 void GLTexture::init(const SensorModel* S)
00200 {
00201    m_sensor_model = S ;
00202    m_width  = S->column_size() ;
00203    m_height = S->row_size() ;
00204 
00205    glGenTextures(1, &m_name) ;
00206 
00207    glBindTexture(GL_TEXTURE_2D, m_name) ;
00208    glPixelStorei(GL_UNPACK_ALIGNMENT, 1) ;
00209    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP) ;
00210    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP) ;
00211    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) ;
00212    glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) ;
00213 
00214    const int W = next_power_of_two(m_width)  ;
00215    const int H = next_power_of_two(m_height) ;
00216    const int N = W * H ;
00217    GLubyte* texture = new GLubyte[N] ;
00218    std::fill_n(texture, N, GLubyte(0)) ;
00219    glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, W, H, 0,
00220                 GL_LUMINANCE, GL_UNSIGNED_BYTE, texture) ;
00221    delete[] texture ;
00222 
00223    update() ;
00224 }
00225 
00226 // Quick helper function object to scale sensor model's probabilities so
00227 // that the min probability corresponds to texel value 0 and max
00228 // probability to texel 255.
00229 class scale {
00230    float min, scale_factor ;
00231 public:
00232    scale(const range<float>& prob_range) ;
00233    GLubyte operator()(float p) const {
00234       return static_cast<GLubyte>((p - min) * scale_factor) ;
00235    }
00236 } ;
00237 
00238 scale::scale(const range<float>& p)
00239    : min(p.min()), scale_factor(255/p.size())
00240 {}
00241 
00242 // Re-create the GL texture using the latest sensor model probabilities
00243 void GLTexture::update()
00244 {
00245    // Make a local copy of the causal probabilities for updating the GL
00246    // texture.
00247    std::vector<float> prob = m_sensor_model->table() ;
00248 
00249    // Find min and max probability values. These are used to scale the
00250    // sensor model's probabilities to an appropriate number for
00251    // visualization. Without the scaling, the probabilities might be much
00252    // too insignificant (e.g., in the order 10^-8 to 10^-3) so that most
00253    // of the texture will end up being blank.
00254    m_prob_range = make_range(*(std::min_element(prob.begin(), prob.end())),
00255                              *(std::max_element(prob.begin(), prob.end()))) ;
00256 
00257    // Scale sensor model's probabilities to [min, max] range to ensure
00258    // that the texture map shows something discernible rather than being
00259    // mostly empty/black.
00260    const int N = prob.size() ;
00261    GLubyte* texture = new GLubyte[N] ;
00262    std::transform(prob.begin(), prob.end(), texture, scale(m_prob_range)) ;
00263 
00264    // The time-to-impact versus LGMD spike rate graph is usually viewed
00265    // with TTI decreasing from left to right (to zero). The texture
00266    // created above will have TTI increase from left to right. Therefore,
00267    // we need to reverse its individual rows.
00268    for (GLubyte* t = texture; t < texture + N; t += m_width)
00269       std::reverse(t, t + m_width) ;
00270 
00271    // Finally, update GL texture object and free memory holding texture
00272    // data (because GL will make a copy).
00273    glBindTexture(GL_TEXTURE_2D, m_name) ;
00274    glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, m_width, m_height,
00275                    GL_LUMINANCE, GL_UNSIGNED_BYTE, texture) ;
00276    delete[] texture ;
00277 }
00278 
00279 // Render the texture representation of the sensor model
00280 void GLTexture::render(float left, float right, float bottom, float top) const
00281 {
00282    glPushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT) ;
00283    glEnable(GL_TEXTURE_2D) ;
00284    glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE) ;
00285    glBindTexture(GL_TEXTURE_2D, m_name) ;
00286 
00287    const float S = static_cast<float>(m_width) /next_power_of_two(m_width) ;
00288    const float T = static_cast<float>(m_height)/next_power_of_two(m_height);
00289    glBegin(GL_QUADS) ;
00290       glTexCoord2i(0, 0) ;
00291       glVertex2f(left, bottom) ;
00292 
00293       glTexCoord2f(S, 0) ;
00294       glVertex2f(right, bottom) ;
00295 
00296       glTexCoord2f(S, T) ;
00297       glVertex2f(right, top) ;
00298 
00299       glTexCoord2f(0, T) ;
00300       glVertex2f(left, top) ;
00301    glEnd() ;
00302 
00303    glDisable(GL_TEXTURE_2D) ;
00304    glPopAttrib() ;
00305 }
00306 
00307 // Clean-up GL texture object
00308 void GLTexture::cleanup()
00309 {
00310    glDeleteTextures(1, &m_name) ;
00311 }
00312 
00313 // Instantiate GL textures for the LOOMING and BLANKING phases of the
00314 // sensor model.
00315 static GLTexture looming_texture  ;
00316 static GLTexture blanking_texture ;
00317 
00318 } // end of local anonymous namespace encapsulating above helper class
00319 
00320 // Quick helper to return a label for the current sigma value used for
00321 // weighting the sensor model probability bins.
00322 static std::string sigma_label(float sigma)
00323 {
00324    std::ostringstream str ;
00325    str << "sigma: " << sigma ;
00326    return str.str() ;
00327 }
00328 
00329 // Quick helper to return a label for the current range of probability
00330 // values used for scaling the texels used to represent the sensor
00331 // model's probabilities.
00332 static std::string prob_label(const range<float>& prob_range)
00333 {
00334    std::ostringstream str ;
00335    str << "P-range: ["
00336        << prob_range.min() << ", " << prob_range.max() << ']' ;
00337    return str.str() ;
00338 }
00339 
00340 void CalibrateLET::gl_init()
00341 {
00342     looming_texture.init(&TTIEstimator:: looming_sensor_model()) ;
00343    blanking_texture.init(&TTIEstimator::blanking_sensor_model()) ;
00344 }
00345 
00346 // This method renders the GL texture used to visualize the sensor
00347 // model's probability values.
00348 void CalibrateLET::render_me()
00349 {
00350    const SensorModel* looming  = &TTIEstimator::looming_sensor_model()  ;
00351    const SensorModel* blanking = &TTIEstimator::blanking_sensor_model() ;
00352 
00353    viz_lock() ;
00354       if (g_update_flag) {
00355          if (g_sensor_model == looming)
00356             looming_texture.update() ;
00357          else if (g_sensor_model == blanking)
00358             blanking_texture.update() ;
00359          g_update_flag = false ;
00360       }
00361    viz_unlock() ;
00362 
00363    setup_view_volume(0, looming_texture.width(),
00364                      -(blanking_texture.height() + 1),
00365                      looming_texture.height() + 1) ;
00366    looming_texture.render(0, looming_texture.width()  - 1,
00367                           1, looming_texture.height() - 1) ;
00368    blanking_texture.render(0, blanking_texture.width() - 1,
00369                            -blanking_texture.height() + 1, -1) ;
00370 
00371    glColor3f(1, 1, 0) ;
00372    glBegin(GL_LINES) ;
00373       glVertex2i(0, 0) ;
00374       glVertex2i(looming_texture.width(), 0) ;
00375    glEnd() ;
00376 
00377    glMatrixMode(GL_PROJECTION) ;
00378    glPopMatrix() ;
00379    glMatrixMode(GL_MODELVIEW) ;
00380    glPopMatrix() ;
00381 
00382    text_view_volume() ;
00383       if (g_sensor_model == looming)
00384          glColor3f(1, 0, 0) ;
00385       else
00386          glColor3f(0.5f, 0.5f, 0.5f) ;
00387       draw_label(3, 14, looming->name().c_str()) ;
00388       draw_label(3, 26, sigma_label(looming->sigma()).c_str()) ;
00389       draw_label(3, 38, prob_label(looming_texture.prob_range()).c_str()) ;
00390 
00391       const int y = m_geometry.height - 26 ;
00392       if (g_sensor_model == blanking)
00393          glColor3f(1, 0, 0) ;
00394       else
00395          glColor3f(0.5f, 0.5f, 0.5f) ;
00396       draw_label(3, y, blanking->name().c_str()) ;
00397       draw_label(3, y + 10, sigma_label(blanking->sigma()).c_str()) ;
00398       draw_label(3, y + 20, prob_label(blanking_texture.prob_range()).c_str());
00399    restore_view_volume() ;
00400 }
00401 
00402 void CalibrateLET::gl_cleanup()
00403 {
00404     looming_texture.cleanup() ;
00405    blanking_texture.cleanup() ;
00406 }
00407 
00408 #endif
00409 
00410 //----------------------------- CLEAN-UP --------------------------------
00411 
00412 CalibrateLET::~CalibrateLET(){}
00413 
00414 //-------------------------- KNOB TWIDDLING -----------------------------
00415 
00416 // Parameters initialization
00417 CalibrateLET::Params::Params()
00418    : m_dsigma(clamp(conf("dsigma", 0.01f), 0.001f, 100.00f))
00419 {}
00420 
00421 // Parameters clean-up
00422 CalibrateLET::Params::~Params(){}
00423 
00424 //-----------------------------------------------------------------------
00425 
00426 } // end of namespace encapsulating this file's definitions
00427 
00428 /* So things look consistent in everyone's emacs... */
00429 /* Local Variables: */
00430 /* indent-tabs-mode: nil */
00431 /* End: */
Generated on Sun May 8 08:05:44 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3