beobot-GSnav-def.H

Go to the documentation of this file.
00001 /*!@file Beobot/beobot-GSnav-def.H
00002   Defined values for robot navigation using saliency and gist.
00003   Run beobot-GSnav-master at CPU_A to run Gist-Saliency model
00004   Run beobot-GSnav        at CPU_B to run SIFT object recognition       */
00005 // //////////////////////////////////////////////////////////////////// //
00006 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00007 // University of Southern California (USC) and the iLab at USC.         //
00008 // See http://iLab.usc.edu for information about this project.          //
00009 // //////////////////////////////////////////////////////////////////// //
00010 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00011 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00012 // in Visual Environments, and Applications'' by Christof Koch and      //
00013 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00014 // pending; application number 09/912,225 filed July 23, 2001; see      //
00015 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00016 // //////////////////////////////////////////////////////////////////// //
00017 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00018 //                                                                      //
00019 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00020 // redistribute it and/or modify it under the terms of the GNU General  //
00021 // Public License as published by the Free Software Foundation; either  //
00022 // version 2 of the License, or (at your option) any later version.     //
00023 //                                                                      //
00024 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00025 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00026 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00027 // PURPOSE.  See the GNU General Public License for more details.       //
00028 //                                                                      //
00029 // You should have received a copy of the GNU General Public License    //
00030 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00031 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00032 // Boston, MA 02111-1307 USA.                                           //
00033 // //////////////////////////////////////////////////////////////////// //
00034 //
00035 // Primary maintainer for this file: Christian Siagian <siagian@usc.edu>
00036 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Beobot/beobot-GSnav-def.H $
00037 // $Id: beobot-GSnav-def.H 12552 2010-01-13 23:05:00Z siagian $
00038 //
00039 /////////////////////////////////////////////////////////////////////////
00040 //
00041 // This is an on-going project for biologically-plausible
00042 // mobile-robotics navigation.
00043 // It accepts any inputs: video  clip <input.mpg>, camera feed, frames.
00044 //
00045 // The system uses Gist to recognize places and saliency
00046 // to get better localization within the place
00047 // The program also is made to be streamline for fast processing using
00048 // parallel computation. That is the V1 features of different channels are
00049 // computed in parallel
00050 //
00051 // Currently it is able to recognize places through the use of gist features.
00052 // The place classifier uses a neural networks,
00053 // passed in a form of <input_train.txt> -
00054 // the same file is used in the training phase by train-FFN.C.
00055 //
00056 // Related files of interest: GistEstimator.C (and .H) and
00057 // GistEstimatorConfigurator.C (and .H) used by Brain.C to compute
00058 // the gist features.
00059 // test-Gist.C uses GistEstimator to extract gist features from an image.
00060 //
00061 // In parallel we use saliency to get a better spatial resolution
00062 // as well as better place accuracy. The saliency model is used to obtain
00063 // salient locations. We then use ShapeEstimator algorithm to segment out
00064 // the sub-region to get a landmark. Using SIFT we can identify the object,
00065 // create a database, etc.
00066 //
00067 // for localization, path planning we perform landmark-hopping
00068 // to get to the final destination
00069 
00070 #ifndef BEOBOT_BEOBOT_GSNAV_DEF_DEFINED
00071 #define BEOBOT_BEOBOT_GSNAV_DEF_DEFINED
00072 
00073 #define VENTRAL_NODE         0
00074 #define DORSAL_NODE          1
00075 
00076 #define INIT_COMM            10000
00077 #define INIT_DONE            10001
00078 #define SEARCH_LM            10002
00079 #define SEARCH_LM_RES        10003
00080 #define TRACK_LM             10004
00081 #define TRACK_LM_RES         10005
00082 #define ABORT                10006
00083 
00084 #define TRAIN_MODE           20000
00085 #define TRAIN_X_MODE         20001
00086 #define TEST_MODE            20002
00087 
00088 #define SEARCH_NOT_DONE      30000
00089 #define LOCALIZED            30001
00090 #define NOT_LOCALIZED        30002
00091 
00092 #define BC_NO_SIGNAL         40000
00093 #define BC_QUIT_SIGNAL       40001
00094 
00095 #define FILE_INPUT           50000
00096 #define CAMERA_INPUT         50001
00097 
00098 //! percentage of images that a landmark has to be matches
00099 //! so that we can combine it with the other landmark
00100 #define NMATCH_THRESHOLD    0.25f
00101 
00102 //! number of frames over which frame rate is computed
00103 #define NAVG 20
00104 
00105 //! Factor to display the sm values as greyscale:
00106 #define SMFAC 0.05F
00107 
00108 //! amount of time alloted to search through landmark db
00109 #define SEARCH_TIME_LIMIT    15 //!< 15 frames
00110 
00111 #endif
00112 
00113 // ######################################################################
00114 /* So things look consistent in everyone's emacs... */
00115 /* Local Variables: */
00116 /* indent-tabs-mode: nil */
00117 /* End: */
Generated on Sun May 8 08:40:11 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3