LoParticle.C

00001 /**
00002    \file  Robots/LoBot/misc/LoParticle.C
00003    \brief This file defines the non-inline member functions of the
00004    lobot::Particle class.
00005 */
00006 
00007 // //////////////////////////////////////////////////////////////////// //
00008 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00009 // by the University of Southern California (USC) and the iLab at USC.  //
00010 // See http://iLab.usc.edu for information about this project.          //
00011 // //////////////////////////////////////////////////////////////////// //
00012 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00013 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00014 // in Visual Environments, and Applications'' by Christof Koch and      //
00015 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00016 // pending; application number 09/912,225 filed July 23, 2001; see      //
00017 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00018 // //////////////////////////////////////////////////////////////////// //
00019 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00020 //                                                                      //
00021 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00022 // redistribute it and/or modify it under the terms of the GNU General  //
00023 // Public License as published by the Free Software Foundation; either  //
00024 // version 2 of the License, or (at your option) any later version.     //
00025 //                                                                      //
00026 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00027 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00028 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00029 // PURPOSE.  See the GNU General Public License for more details.       //
00030 //                                                                      //
00031 // You should have received a copy of the GNU General Public License    //
00032 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00033 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00034 // Boston, MA 02111-1307 USA.                                           //
00035 // //////////////////////////////////////////////////////////////////// //
00036 //
00037 // Primary maintainer for this file: mviswana usc edu
00038 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Robots/LoBot/slam/LoParticle.C $
00039 // $Id: LoParticle.C 13628 2010-06-28 23:48:02Z mviswana $
00040 //
00041 
00042 //------------------------------ HEADERS --------------------------------
00043 
00044 // lobot headers
00045 #include "Robots/LoBot/slam/LoParticle.H"
00046 #include "Robots/LoBot/slam/LoMap.H"
00047 #include "Robots/LoBot/slam/LoCoords.H"
00048 #include "Robots/LoBot/slam/LoSlamParams.H"
00049 
00050 #include "Robots/LoBot/config/LoConfigHelpers.H"
00051 
00052 #include "Robots/LoBot/misc/LoClipper.H"
00053 #include "Robots/LoBot/misc/LoLineRasterizer.H"
00054 #include "Robots/LoBot/misc/LoExcept.H"
00055 #include "Robots/LoBot/misc/singleton.hh"
00056 
00057 #include "Robots/LoBot/util/LoMath.H"
00058 #include "Robots/LoBot/util/triple.hh"
00059 
00060 // INVT utilities
00061 #include "Util/log.H"
00062 
00063 // Standard C++ headers
00064 #include <numeric>
00065 #include <algorithm>
00066 #include <utility>
00067 
00068 //----------------------------- NAMESPACE -------------------------------
00069 
00070 namespace lobot {
00071 
00072 //-------------------------- INITIALIZATION -----------------------------
00073 
00074 Particle::
00075 Particle(float initial_weight, const std::vector<int>& range_readings)
00076    : m_pose(SlamParams::initial_pose()),
00077      m_map(new OccGrid()),
00078      m_ref_scan(m_pose, range_readings),
00079      m_weight(initial_weight)
00080 {
00081    m_this = reinterpret_cast<unsigned long>(this) ;
00082 }
00083 
00084 Particle::
00085 Particle(float initial_weight, const std::vector<int>& range_readings,
00086          const boost::shared_ptr<OccGrid>& known_map)
00087    : m_pose(SlamParams::initial_pose()),
00088      m_map(known_map),
00089      m_ref_scan(m_pose, range_readings),
00090      m_weight(initial_weight)
00091 {
00092    m_this = reinterpret_cast<unsigned long>(this) ;
00093 }
00094 
00095 Particle::Particle(const Particle& that)
00096    : m_pose(that.m_pose),
00097      m_map(SlamParams::localization_mode() ? that.m_map
00098                                            : MapPtr(new OccGrid(*that.m_map))),
00099      m_ref_scan(that.m_ref_scan),
00100      m_weight(that.m_weight)
00101 {
00102    m_this = reinterpret_cast<unsigned long>(this) ; // remember: this != that
00103 }
00104 
00105 Particle& Particle::operator=(const Particle& that)
00106 {
00107    if (this != &that)
00108    {
00109       m_pose = that.m_pose ;
00110       m_map  = that.m_map ;
00111       m_ref_scan = that.m_ref_scan ;
00112       m_weight   = that.m_weight ;
00113       m_this = reinterpret_cast<unsigned long>(this) ;//remember: this != that
00114    }
00115    return *this ;
00116 }
00117 
00118 // Randomize this particle's pose estimate; useful to help deal with
00119 // filter mislocalizations.
00120 void Particle::randomize()
00121 {
00122    /*
00123    LERROR("old pose:   [%08lX] %10.2f %10.2f %6.1f",
00124           m_this, m_pose.x(), m_pose.y(), m_pose.theta()) ;
00125    // */
00126 
00127    float L, R, B, T ;
00128    SlamParams::map_extents(&L, &R, &B, &T) ;
00129 
00130    m_pose.x(randomf(L, R)) ;
00131    m_pose.y(randomf(B, T)) ;
00132    m_pose.t(clamp_angle(randomf(0, 360))) ;
00133 
00134    /*
00135    LERROR("rnd pose:   [%08lX] %10.2f %10.2f %6.1f",
00136           m_this, m_pose.x(), m_pose.y(), m_pose.theta()) ;
00137    // */
00138 }
00139 
00140 //--------------------------- MOTION MODEL ------------------------------
00141 
00142 namespace {
00143 
00144 // This function computes a new pose from the given pose and odometry
00145 // data.
00146 //
00147 // NOTE: Raw odometry will simply inform us about the net displacement
00148 // and rotation. In actuality, the robot might well have followed a
00149 // bizarrely tortuous/circuitous route to get to its new location and
00150 // bearing. However, there is no (easy/straightforward) way to know or
00151 // model such an occurence.
00152 //
00153 // Nonetheless, if the robot's odometry is checked with reasonable
00154 // frequency, we can safely assume that the path followed by the robot as
00155 // indicated by raw odometry ought to be along a short circular arc,
00156 // which is much easier to model.
00157 //
00158 // But something even easier to work with is simple linear motion. That
00159 // is, if we use raw odometry in short, frequent bursts, we can just
00160 // approximate the motion as an in-place rotation at the current location
00161 // so as to face the destination and then a straight-line translation to
00162 // the new position.
00163 //
00164 // A slight improvement to the above "point-and-shoot" approach is to
00165 // break up the overall circular motion into several piecewise linear
00166 // steps. Thus, we start off in the robot's original heading, move a
00167 // short distance, turn a little, move a little, etc. till we get to the
00168 // final position and heading.
00169 //
00170 // Here, we choose to perform the above piecewise linear motion in three
00171 // steps. First, we move by a third of the total displacement reported by
00172 // odometry. Then, we rotate by half the amount reported by odometry and
00173 // travel straight for another third of the total displacement. Finally,
00174 // we rotate the remaining half angle and travel the final third of the
00175 // way to get to the final position.
00176 //
00177 // Although this approach is crude, the probabilistic framework of the
00178 // FastSLAM particle filter should still be robust enough to work without
00179 // too much degradation in the face of this approximation.
00180 Pose operator+(const Pose& p, const Odometry& od)
00181 {
00182    float d = od.displacement()/3.0f ;
00183    float r = od.rotation()/2.0f ;
00184 
00185    // STEP 1: Travel 1/3 of total displacement along current heading
00186    float t = p.theta() ;
00187    float x = p.x() + d * cos(t) ;
00188    float y = p.y() + d * sin(t) ;
00189 
00190    // STEP 2: Rotate 1/2 total rotation and travel another third of the way
00191    t += r ;
00192    x += d * cos(t) ;
00193    y += d * sin(t) ;
00194 
00195    // STEP 3: Rotate and travel remaining amount
00196    t += r ;
00197    x += d * cos(t) ;
00198    y += d * sin(t) ;
00199 
00200    return Pose(x, y, t) ;
00201 }
00202 
00203 } // end of local anonymous namespace encapsulating above helpers
00204 
00205 // The motion model uses the current control input and produces a new
00206 // state from the current state according to P(xt|xt-1,ut).
00207 //
00208 // NOTE: The raw odometry returned by the robot's lower layers does not
00209 // serve directly as the current control input. Instead, we use the
00210 // latest laser range finder measurements to correct the raw odometry by
00211 // applying the Lu-Milios scan matching algorithm and then use its
00212 // resulting transformation as the control input (viz., ut).
00213 //
00214 // This approach allows us to drastically reduce the number of particles
00215 // required by the grid-based FastSLAM algorithm, which is imperative
00216 // because it requires each particle to carry its own copy of the
00217 // occupancy map. See "An Efficient FastSLAM Algorithm for Generating
00218 // Maps of Large-Scale Cyclic Environments from Raw Laser Range
00219 // Measurements" by Hahnel, Burgard, Fox and Thrun, IROS 2003.
00220 //
00221 // NOTE 2: The Robolocust implementation of the Lu-Milios IDC algorithm
00222 // does not converge (circa May 2010)! As a result, it produces bizarre
00223 // "corrections," which, in fact, are far worse than using raw odometry.
00224 // Therefore, for now, as a workaround, we use a config flag to turn scan
00225 // matching off.
00226 void
00227 Particle::
00228 apply_motion_model(const Odometry& odometry, const std::vector<int>& lrf)
00229 {
00230    // First, use raw odometry to produce an estimate of the new pose
00231    Pose new_pose = m_pose + odometry ;
00232    /*
00233    LERROR("old pose:   [%08lX] %10.2f %10.2f %6.1f",
00234           m_this, m_pose.x(), m_pose.y(), m_pose.theta()) ;
00235    LERROR("odometry:   [%08lX] %7d %18d",
00236           m_this, odometry.displacement(), odometry.rotation()) ;
00237    LERROR("new pose:   [%08lX] %10.2f %10.2f %6.1f",
00238           m_this, new_pose.x(), new_pose.y(), new_pose.theta()) ;
00239    // */
00240 
00241    // Then, if scan matching is enabled, correct the raw odometry and use
00242    // the corrected values and suitable noise to perturb the particle.
00243    float L, R, B, T ;
00244    SlamParams::map_extents(&L, &R, &B, &T) ;
00245    const float x_noise = SlamParams::x_error() ;
00246    const float y_noise = SlamParams::y_error() ;
00247    const float t_noise = SlamParams::t_error() ;
00248    if (SlamParams::match_scans())
00249    {
00250       throw misc_error(BROKEN_FEATURE) ;
00251 
00252       Scan new_scan(new_pose, lrf) ;
00253       Transformation ut = match_scans(new_scan, m_ref_scan) ;
00254       m_ref_scan = new_scan ;
00255       /*
00256       LERROR("scan match: [%08lX] %10.2f %10.2f %6.1f",
00257              m_this, ut.Tx(), ut.Ty(), ut.w()) ;
00258       // */
00259 
00260       m_pose.dx(-ut.Tx() + sample_tri(x_noise)) ;
00261       m_pose.dy(-ut.Ty() + sample_tri(y_noise)) ;
00262       m_pose.dt(-ut.w()  + sample_tri(t_noise)) ;
00263    }
00264    else // scan matching off; therefore, produce new
00265    {    // state estimate using raw odometry and noise
00266       Pose p(new_pose) ;
00267       for (int i = 0; i < 10; ++i)
00268       {
00269          p.x(clamp(new_pose.x() + sample_tri(x_noise), L, R)) ;
00270          p.y(clamp(new_pose.y() + sample_tri(y_noise), B, T)) ;
00271          p.t(new_pose.t() + sample_tri(t_noise)) ;
00272 
00273          // Ensure that new state obtained by applying odometry + noise
00274          // ends up somewhere reasonable, i.e., in a map cell that is not
00275          // occupied by some obstacle. This ought to improve the proposal
00276          // distribution, especially near obstacles.
00277          //
00278          // NOTE: We could end up testing the newly sampled poses against
00279          // the occupancy map forever. So we test this a maximum of ten
00280          // times (or some other reasonable, small number).
00281          int x, y ;
00282          Coords::to_grid(p.x(), p.y(), &x, &y) ;
00283          if (m_map->is_vacant(x, y))
00284             break ;
00285       }
00286       m_pose = p ;
00287    }
00288    /*
00289    LERROR("final pose: [%08lX] %10.2f %10.2f %6.1f",
00290           m_this, m_pose.x(), m_pose.y(), m_pose.theta()) ;
00291    // */
00292 }
00293 
00294 //--------------------------- SENSOR MODEL ------------------------------
00295 
00296 namespace {
00297 
00298 // This function object implements the beam model described in Chapter 6
00299 // (section 6.3) of Thrun, Burgard and Fox's "Probabilistic Robotics."
00300 // Specifically, it implements the loop body of the algorithm presented
00301 // in table 6.1 on page 158.
00302 class apply_beam_model {
00303    const   OccGrid& m_map   ;
00304    const   Pose&    m_pose  ;
00305    const   unsigned long m_this ; // for debugging
00306    const   Clipper  m_clipper   ;
00307    const   int      m_step  ;
00308    int m_angle ;
00309    int m_obx, m_oby ;
00310 
00311 public:
00312    apply_beam_model(const OccGrid&, const Pose&, unsigned long particle_addr) ;
00313    float operator()(float q, int z) ;
00314 
00315 private:
00316    float cast_ray(float angle) ;
00317    float cast_ray(const float P[4]) ;
00318    float prob_exp(float z_expected, float z_actual) const ;
00319 
00320    // This is a helper function object for checking the occupancy grid's
00321    // cells to see if they are occupied or not. It is used in conjunction
00322    // with the ray casting operation to find the nearest obstacle
00323    // in the direction of a range measurement's beam.
00324    class check_occupancy {
00325       apply_beam_model& outer ;
00326    public:
00327       check_occupancy(apply_beam_model&) ;
00328       bool operator()(int x, int y) ;
00329    } ;
00330 
00331    // The above inner class will need access to the apply_beam_model
00332    // object's innards (so that it can set the m_obx and m_oby fields
00333    // when it finds an obstacle).
00334    friend class check_occupancy ;
00335 } ;
00336 
00337 apply_beam_model::
00338 apply_beam_model(const OccGrid& m, const Pose& p, unsigned long particle_addr)
00339    : m_map(m), m_pose(p), m_this(particle_addr),
00340      m_clipper(SlamParams::map_extents()),
00341      m_step(SlamParams::beam_step()), m_angle(SlamParams::beam_start()),
00342      m_obx(-1), m_oby(-1)
00343 {}
00344 
00345 apply_beam_model::check_occupancy::check_occupancy(apply_beam_model& m)
00346    : outer(m)
00347 {}
00348 
00349 // This is the actual function that implements the loop body of algorithm
00350 // beam_range_finder_model on page 158 of "Probabilistic Robotics" by
00351 // Thrun, Burgard and Fox. The basic idea is to compute the likelihood of
00352 // seeing the current measurement, zt, given the current state, xt, and
00353 // the current map, mt; in other words: P(zt|xt,mt).
00354 //
00355 // Since each scan of a laser range finder contains several distance
00356 // measurements, we must evaluate P(zt|xt,mt) for each of these readings.
00357 // The final value of P(zt|xt,mt), viz., q, is obtained by multiplying
00358 // the probabilities for the individual readings.
00359 //
00360 // This function computes the p term shown in table 6.1, page 158 of the
00361 // Thrun book and then returns a "running total" of q by taking the
00362 // product of p and the value of q so far. Thus, this function object is
00363 // meant to be used in conjunction with the STL accumulate algorithm.
00364 //
00365 // NOTE: The beam model described in the Thrun book makes use of four
00366 // components, viz., p_hit, p_short, p_max and p_rnd. Here, we only use
00367 // p_hit, which we refer to as p_exp, and p_rnd. The basic idea here is
00368 // to compute a reasonable number that can be used to weight a particle's
00369 // estimate of the robot's pose and occupancy map. Therefore, we don't
00370 // need a very detailed model; only something that is good enough to
00371 // produce good relative weighting factors.
00372 //
00373 // When a particle has high weight, it means that its state estimate
00374 // matches the current sensor data and it should be retained by the
00375 // filter's resampling step. A particle with low weight, on the other
00376 // hand, implies a poor state estimate and would be a candidate for
00377 // culling from the particle population.
00378 //
00379 // To fulfill this weighting requirement, we simply check the current
00380 // measurement against the particle's map and pose by computing an
00381 // expected range reading and seeing how closely the actual measurement
00382 // matches the expected one. The p_exp component of the probability
00383 // computation takes care of rating the particle's state estimate based
00384 // on the actual measurement.
00385 //
00386 // The p_rnd component takes care of accounting for all sorts of effects
00387 // that cannot or need not be explicitly modeled. Combining these two
00388 // will yield the necessary beam probability, which more than suffices
00389 // for weighting particles. There is no need for additional complexities
00390 // such as p_short and weighting the individual components, etc.; they
00391 // simply don't add enough value to warrant their use.
00392 float apply_beam_model::operator()(float q, int z)
00393 {
00394    if (z < 0)    // bad distance reading from LRF
00395       return q ; // discard it from beam model's probability computations
00396 
00397    // If we have a good distance reading, then compute expected range by
00398    // ray casting in the map along z's direction and seeing where the
00399    // nearest obstacle lies.
00400    float z_exp = cast_ray(m_pose.theta() + m_angle) ;
00401    /*
00402    if (z_exp > 0) {
00403       LERROR("range:  [%08lX] {%4d} z_exp = %6.1f, z_act = %4d",
00404              m_this, m_angle, z_exp, z) ;
00405    }
00406    // */
00407 
00408    // Now, figure out how far off the mark the actual measurement is by
00409    // applying a Gaussian centered at the expected range. If a good
00410    // expected range could not be computed (because perhaps the map has
00411    // not yet congealed), then we set this component of the beam
00412    // probability to zero.
00413    float p_exp = (z_exp < 0) ? 0
00414                              : gaussian(z, z_exp, SlamParams::beam_sigma()) ;
00415 
00416    // The actual measurement may also be off the mark due to various
00417    // random effects.
00418    //
00419    // NOTE: Random effects are modeled as a uniform distribution spanning
00420    // the LRF measurement range. Therefore, this likelihood is a constant
00421    // and can be computed by and stored in the parameters structure.
00422    float p_rnd = SlamParams::beam_prob_rnd() ;
00423 
00424    // Overall likelihood of seeing current measurement given newly
00425    // computed state (from motion model) and most recent occupancy map
00426    // will be a combination of the above two probabilities.
00427    float p = p_exp + p_rnd ;
00428    /*
00429    if (p_exp > 0) {
00430       LERROR("P_exp:  [%08lX] {%4d} %10.8f", m_this, m_angle, p_exp) ;
00431       LERROR("P_rnd:  [%08lX] {%4d} %10.8f", m_this, m_angle, p_rnd) ;
00432       LERROR("P_tot:  [%08lX] {%4d} %10.8f", m_this, m_angle, p) ;
00433    }
00434    // */
00435 
00436    // Setup for next distance reading's ray casting...
00437    m_angle += m_step ;
00438 
00439    // Finally, combine this beam's likelihood value with that of the
00440    // other beams...
00441    //
00442    // NOTE: This step corresponds to the q = q.p line in table 6.1, page
00443    // 158 of the Thrun book. This function returns the RHS of the above
00444    // expression. The LHS is put together by the STL accumulate
00445    // algorithm, which this helper function object is passed to in
00446    // Particle::apply_sensor_model() defined below.
00447    //
00448    // NOTE: To work around floating point instabilities associated with
00449    // the tiny numbers that result from multiplying several [0,1]
00450    // probabilities, we apply a fudge factor to scale the individual beam
00451    // probabilities to > 1. The particle filter's normalization step
00452    // should bring the particle weights back down to [0,1].
00453    return q * p * SlamParams::beam_fudge() ;
00454 }
00455 
00456 // This function casts a ray from the robot's current position within the
00457 // map along the specified angular direction. When it encounters an
00458 // obstacle along the ray, i.e., an occupancy grid cell with high
00459 // likelihood of containing some object, it returns the distance between
00460 // that cell and the robot's current position as the expected range
00461 // reading. The discrepancy between the expected and actual range
00462 // readings is then used in the P(zt|xt,mt) computation.
00463 //
00464 // Since ray casting uses a line rasterization algorithm, we need to
00465 // determine the ray's start and end points. As mentioned above, the
00466 // start point is the robot's current position. To find the end point, we
00467 // simply multiply the ray's direction vector by a large distance and
00468 // then we clip the resulting line to the map's boundaries to ensure that
00469 // the ray doesn't end up exploring an area outside of the map.
00470 float apply_beam_model::cast_ray(float angle)
00471 {
00472    const float D = SlamParams::max_map_distance() ;
00473    float ray_end_points[] = {
00474       m_pose.x(), m_pose.y(),
00475       m_pose.x() + D * cos(angle), m_pose.y() + D * sin(angle),
00476    } ;
00477    float new_end_points[4] = {0} ;
00478    switch (m_clipper.clip(ray_end_points, new_end_points))
00479    {
00480       case Clipper::COMPLETELY_INSIDE:
00481       case Clipper::SECOND_POINT_CLIPPED:
00482          /*
00483          LERROR("ray re: [%08lX] {%6.1f} (%.2f, %.2f) (%.2f, %.2f)",
00484                 m_this, angle,
00485                 new_end_points[0], new_end_points[1],
00486                 new_end_points[2], new_end_points[3]) ;
00487          // */
00488          return cast_ray(new_end_points) ;
00489 
00490       case Clipper::COMPLETELY_OUTSIDE:   // robot outside map!?!
00491       case Clipper::FIRST_POINT_CLIPPED:
00492       case Clipper::BOTH_POINTS_CLIPPED:
00493       default: // Clipper has returned total crap!
00494          return -1 ; // discard this ray
00495    }
00496 }
00497 
00498 // The previous function's main task is to ensure that the ray casting
00499 // operation stays within the map's bounds. The following function is the
00500 // one that actually performs the ray casting and expected range
00501 // computation given the ray's end points.
00502 //
00503 // As we walk along the ray, if we find no obstacle at all, then we
00504 // return -1 to indicate that no good estimate could be made of the
00505 // expected range in the given ray's direction. But if we do find an
00506 // obstacle, then the expected range is calculated as the Euclidean
00507 // distance between the obstacle's location and the robot's position.
00508 float apply_beam_model::cast_ray(const float P[4])
00509 {
00510    // First, convert the ray's end points from "real space" into "grid
00511    // space." This allows us to perform the line rasterization part of
00512    // ray casting entirely with integer coordinates. And, in any case, we
00513    // need to perform ray casting in grid coordinates because we want to
00514    // find the grid cells that intersect with the ray.
00515    int x0, y0, x1, y1 ;
00516    Coords::to_grid(P[0], P[1], &x0, &y0) ;
00517    Coords::to_grid(P[2], P[3], &x1, &y1) ;
00518    //LERROR("ray gr: [%08lX] (%4d, %4d) (%4d, %4d)", m_this, x0, y0, x1, y1) ;
00519 
00520    // Once we have the ray's end points in grid coordinates, we rasterize
00521    // the line joining these two points to find the grid cells
00522    // intersected by the range reading. The rasterizer will invoke
00523    // check_occupancy's operator()(int, int) method (defined below),
00524    // which will mark the nearest obstacle's location when it finds that
00525    // spot.
00526    //
00527    // Since grid coordinates go from zero to W-1 or H-1 (where W and H
00528    // are the width and height of the occupancy grid), we use (-1,-1) to
00529    // indicate the lack of an obstacle along the ray. When the rasterizer
00530    // is done, if the obstacle location is still (-1,-1), then there is
00531    // no obstacle in the ray's direction.
00532    m_obx = m_oby = -1 ;
00533    rasterize_line(x0, y0, x1, y1, check_occupancy(*this)) ;
00534    //LERROR("ob.loc: [%08lX] (%4d,%4d) [grid coords]", m_this, m_obx, m_oby) ;
00535    if (m_obx < 0 || m_oby < 0) // no obstacle along this ray
00536       return -1 ;
00537 
00538    // If the above ray casting operation did find an obstacle, we convert
00539    // that cell's coordinates to real/physical coordinates and then
00540    // return the expected range as the Euclidean distance between the
00541    // obstacle's location and the robot's current position.
00542    float ox, oy ;
00543    Coords::to_real(m_obx, m_oby, &ox, &oy) ;
00544    //LERROR("ob.loc: [%08lX] (%10.2f,%10.2f) [real coords]", m_this, ox, oy) ;
00545    return std::min(sqrtf(sqr(ox - P[0]) + sqr(oy - P[1])),
00546                    SlamParams::beam_range().max());
00547 }
00548 
00549 // To find the expected range reading in some direction, we cast a ray
00550 // along that direction from the robot's current position in the map and
00551 // look at the cells in the occupancy grid intersected by that ray. When
00552 // the probability in a cell is above some predefined threshold, we mark
00553 // that spot as the location of the nearest obstacle in the ray's
00554 // direction.
00555 //
00556 // The ray casting operation is implemented using Bresenham's line
00557 // rasterization algorithm, which enumerates the pixel (i.e., occupancy
00558 // grid) positions starting at the robot's current location and ending at
00559 // the cell where the range reading terminates. The Robolocust
00560 // implementation of Bresenham's algorithm uses a callback function
00561 // object for each pixel on the line joining two points.
00562 //
00563 // apply_beam_model::check_occupancy serves as the function object for
00564 // the line rasterizer. In particular, the following member function is
00565 // the one called by the rasterizer. As mentioned above, each time it is
00566 // called, it will be passed the coordinates of the current pixel (i.e.,
00567 // occupancy grid cell), under consideration. To do its thing, it checks
00568 // the probability value of that cell against a preconfigured threshold
00569 // and then marks that spot as the location of the nearest obstacle if
00570 // the threshold condition is satisfied.
00571 //
00572 // Furthermore, the Robolocust implementation of Bresenham's line
00573 // rasterization algorithm features an early exit test. Once we've found
00574 // the location of the nearest obstacle along the ray being cast, there
00575 // is no need to check the remaining occupancy grid cells in that ray's
00576 // direction. Therefore, after marking the spot, we can tell the
00577 // rasterizer to stop the ray casting.
00578 //
00579 // DEVNOTE: check_occupancy is an inner, friend class of
00580 // apply_beam_model. check_occupancy::outer is a reference to the
00581 // apply_beam_model instance that creates and passes this function object
00582 // to the line rasterizer. We need this indirect approach because
00583 // lobot::rasterize_line() is a template function that takes a visitor
00584 // function for triggering its set_pixel callbacks.
00585 //
00586 // This callback can be a regular function pointer or a function object.
00587 // In case of the latter, that object will be passed by value.
00588 // Consequently, the state changes recorded by lobot::rasterize_line()'s
00589 // local copy of the function object will not be reflected in the line
00590 // rasterization function's caller.
00591 //
00592 // In this particular case, that means the obstacle location will not get
00593 // passed back here to apply_beam_model. Therefore, we need the
00594 // check_occupancy function object to hold a reference to the relevant
00595 // data members of apply_beam_model. But instead of holding a reference
00596 // to just those data members we need here, it is just simpler to
00597 // reference the entire apply_beam_model object and use whatever of its
00598 // members we require.
00599 bool apply_beam_model::check_occupancy::operator()(int x, int y)
00600 {
00601    /*
00602    float occ = outer.m_map.get(x, y) ;
00603    LERROR("oc.chk: [%08lX] (%4d,%4d) = %11.8f [%10.8f]",
00604           outer.m_this, x, y, occ, log_odds_to_prob(occ)) ;
00605    // */
00606    if (outer.m_map.is_occupied(x, y)) {
00607       outer.m_obx = x ;
00608       outer.m_oby = y ;
00609       return false ; // found nearest obstacle ==> stop ray casting
00610    }
00611    return true ;
00612 }
00613 
00614 } // end of local anonymous namespace encapsulating above helpers
00615 
00616 // The sensor model uses the latest observation to correct the state
00617 // prediction made by the motion model according to P(xt|zt). Since we're
00618 // doing SLAM, the state is also contingent upon a map, i.e., the
00619 // filter's correction step has to gauge P(xt|zt,mt).
00620 //
00621 // In accordance with Bayes' Rule, we evaluate the diagnostic probability
00622 // P(xt|zt,mt) by using the more easily available causal information,
00623 // viz., P(zt|xt,mt). To figure out P(zt|xt,mt), we apply a beam model to
00624 // the laser range finder that computes the required probability by
00625 // seeing how closely the actual sensor data matches the expected range
00626 // measurements as per the particle's pose and map.
00627 //
00628 // The above model is described in Chapter 6, section 3 of "Probabilistic
00629 // Robotics" by Thrun, Burgard and Fox. This function implements the
00630 // algorithm beam_range_finder_model shown in table 6.1, page 158.
00631 // Instead of an explicit loop as shown in the book, we use the STL
00632 // accumulate algorithm to compute q, which is the desired causal
00633 // probability, viz., P(zt|xt,mt). This probability is then used to
00634 // weight each particle in the correction step of the FastSLAM particle
00635 // filter.
00636 //
00637 // NOTE: When we multiply the probabilities for individual beams to get
00638 // the final P(z|x,m) value, the number will be extremely small (e.g., in
00639 // the range 10^-50 to 10^-80). Such numbers result in numerical
00640 // instabilities (regardless of floating point precision). To work around
00641 // this problem, we multiply the individual beam probabilities by a large
00642 // constant. Consequently, the particle weight will not generally lie in
00643 // the range [0,1]. The filter's normalization step should take care of
00644 // bringing the particle weights back to the usual [0,1] ranage.
00645 void Particle::apply_sensor_model(const std::vector<int>& zt)
00646 {
00647    //LERROR("wt.be4: [%08lX] %14.8e (%10.8e)", m_this, m_weight, m_weight) ;
00648    m_weight = std::accumulate(zt.begin(), zt.end(), 1.0f,
00649                               apply_beam_model(*m_map, m_pose, m_this)) ;
00650    //LERROR("wt.aft: [%08lX] %14.8e", m_this, m_weight) ;
00651 }
00652 
00653 //---------------------------- MAP UPDATES ------------------------------
00654 
00655 namespace {
00656 
00657 // A helper function object for applying the inverse measurement model
00658 // described in Chapter 9, section 2 of "Probabilistic Robotics" by
00659 // Thrun, Burgard and Fox.
00660 class apply_inverse_range_sensor_model {
00661    OccGrid&      m_map  ;
00662    const Pose&   m_pose ;
00663    unsigned long m_this ; // for debugging
00664    const    int  m_step ;
00665    const   float m_ray_delta, m_ray_diagonal_delta ;
00666    int   m_angle ;
00667    int   m_prev_x, m_prev_y ;
00668    float m_range_reading, m_ray_range ;
00669 
00670 public:
00671    apply_inverse_range_sensor_model(OccGrid&, const Pose&, unsigned long) ;
00672    void operator()(int z) ;
00673 
00674 private:
00675    // This is a helper function object for updating the occupancy grid's
00676    // cells. It is used in conjunction with the ray casting operation to
00677    // mark the cells at the end of a range measurement's beam as occupied
00678    // and the intermediate ones as vacant.
00679    class update_occupancy {
00680       apply_inverse_range_sensor_model& outer ;
00681    public:
00682       update_occupancy(apply_inverse_range_sensor_model&) ;
00683       bool operator()(int x, int y);
00684    } ;
00685 
00686    // The above inner class will need access to the
00687    // apply_inverse_range_sensor_model object's innards (so that it can
00688    // update the occupancy grid and track/set other relevant data members
00689    // during the ray casting).
00690    friend class update_occupancy ;
00691 } ;
00692 
00693 apply_inverse_range_sensor_model::
00694 apply_inverse_range_sensor_model(OccGrid& M, const Pose& P, unsigned long addr)
00695    : m_map(M), m_pose(P), m_this(addr),
00696      m_step(SlamParams::beam_step()),
00697      m_ray_delta(SlamParams::map_cell_size()),
00698      m_ray_diagonal_delta(1.41421356f /*sqrt(2)*/ * m_ray_delta),
00699      m_angle(SlamParams::beam_start()),
00700      m_prev_x(-1), m_prev_y(-1),
00701      m_range_reading(-1), m_ray_range(-1)
00702 {}
00703 
00704 apply_inverse_range_sensor_model::
00705 update_occupancy::update_occupancy(apply_inverse_range_sensor_model& m)
00706    : outer(m)
00707 {}
00708 
00709 // This function implements a variation of algorithm
00710 // occupancy_grid_mapping shown in table 9.1, page 286 of the Thrun book.
00711 // Instead of looping over all the cells in the occupancy grid and
00712 // checking if they are in the perceptual field of the laser range
00713 // finder's range readings, we take each reading one at a time and cast a
00714 // ray along its direction. All the cells that intersect this ray will be
00715 // in the perceptual field of that particular reading; the others will
00716 // not.
00717 void apply_inverse_range_sensor_model::operator()(int z)
00718 {
00719    if (z < 0) // bad LRF reading
00720       return ;
00721 
00722    // Setup ray casting by converting from physical to grid coordinates
00723    // the robot's current position, which is the ray's starting point,
00724    // and the end point of the range measurement, the ray's end point.
00725    int x0, y0, x1, y1 ;
00726    Coords::to_grid(m_pose.x(), m_pose.y(), &x0, &y0) ;
00727    Coords::to_grid(m_pose.x() + z * cos(m_pose.theta() + m_angle),
00728                    m_pose.y() + z * sin(m_pose.theta() + m_angle),
00729                    &x1, &y1) ;
00730    /*
00731    LERROR("range:    [%08lX] {%4d} %4d", m_this, m_angle, z) ;
00732    LERROR("ray grid: [%08lX] {%4d} (%4d,%4d) (%4d,%4d)",
00733           m_this, m_angle, x0, y0, x1, y1) ;
00734    // */
00735 
00736    // Ray casting will be performed by the line rasterizer, which will
00737    // invoke the function call operator for the update_occupancy object.
00738    // That function will be passed the coordinates of each intersecting
00739    // cell as the ray progresses from its start point to its end point.
00740    // However, in order to properly update a cell, the line rasterizer
00741    // callback function will need to know the current range reading, the
00742    // distance between the current cell and the robot position, etc.
00743    //
00744    // Therefore, before triggering ray casting, we setup the parameters
00745    // the ray casting callback will need for its map update operation.
00746    m_range_reading = z ;
00747    m_ray_range     = 0 ;
00748    m_prev_x = x0 - sign0(x1 - x0) ;
00749    m_prev_y = y0 - sign0(y1 - y0) ;
00750 
00751    // Now, we're ready to cast the ray along the current reading's
00752    // direction starting at the robot's current position...
00753    rasterize_line(x0, y0, x1, y1, update_occupancy(*this)) ;
00754 
00755    // Setup for the next range reading...
00756    m_angle += m_step ;
00757 }
00758 
00759 // This is the callback function for the line rasterizer. It implements a
00760 // variation of algorithm inverse_range_sensor_model shown in table 9.2,
00761 // page 288 of the Thrun book. The basic idea here is to mark the cells
00762 // near the end of the ray as occupied and all the cells in between as
00763 // free space.
00764 bool
00765 apply_inverse_range_sensor_model::update_occupancy::
00766 operator()(int x, int y)
00767 {
00768    const int W = SlamParams::map_width() ;
00769    const int H = SlamParams::map_height();
00770    if (x < 0 || x >= W || y < 0 || y >= H)
00771       return false ; // crossed map boundaries ==> quit ray casting
00772    /*
00773    float occ = outer.m_map.get(x, y) ;
00774    LERROR("current:  [%08lX] (%4d,%4d) p[%11.8f %10.8f] r = %10.2f",
00775           outer.m_this, x, y, occ, log_odds_to_prob(occ),
00776           outer.m_ray_range) ;
00777    // */
00778 
00779    // m_ray_range holds the distance, in the real/world coordinate
00780    // system, between the current cell (x,y) and the robot's position. If
00781    // the difference between this cell's distance and the range reading
00782    // is less than 10% of the range reading's value, then cell (x,y) must
00783    // be near the end of the ray. Therefore, we mark it as occupied.
00784    // Otherwise, the cell must be one of the intermediate ones; so we
00785    // mark it as free space.
00786    const float threshold = SlamParams::beam_occ_range_error() ;
00787    if (abs(outer.m_range_reading - outer.m_ray_range) < threshold) {
00788       outer.m_map.occupied(x, y) ;
00789       /*
00790       float occ = outer.m_map.get(x, y) ;
00791       LERROR("occupied: [%08lX] (%4d,%4d) p[%11.8f %10.8f] r = %10.2f",
00792              outer.m_this, x, y, occ, log_odds_to_prob(occ),
00793              outer.m_ray_range) ;
00794       // */
00795       //return false ; // reached an obstacle ==> mark it and quit ray casting
00796    }
00797    else if (outer.m_ray_range < outer.m_range_reading) { // double check
00798       outer.m_map.vacant(x, y) ;
00799       /*
00800       float occ = outer.m_map.get(x, y) ;
00801       LERROR("vacant:   [%08lX] (%4d,%4d) p[%11.8f %10.8f] r = %10.2f",
00802              outer.m_this, x, y, occ, log_odds_to_prob(occ),
00803              outer.m_ray_range) ;
00804       // */
00805    }
00806 
00807    // In the Thrun book, they compute r in the algorithm on page 288 for
00808    // all the cells that lie in the measurement's perceptual field. In
00809    // this implementation, we simply use m_ray_range as r and keep track
00810    // of it as we move along the ray.
00811    //
00812    // In each iteration, the line rasterizer will move one pixel from the
00813    // previous/current pixel to the next one. Since we know the size of a
00814    // cell in the occupancy grid, we can keep track of r by incrementing
00815    // m_ray_range by the cell size. The only catch is that for diagonal
00816    // moves, the increment amount will be sqrt(2) times the cell size
00817    // (Pythagoras).
00818    switch (abs(x - outer.m_prev_x) + abs(y - outer.m_prev_y))
00819    {
00820       case 0:  // ray has not moved (will happen in the first iteration)
00821          break ;
00822       case 1:  // ray has moved vertically or horizontally
00823          outer.m_ray_range += outer.m_ray_delta ;
00824          break ;
00825       case 2:  // ray has moved diagonally
00826          outer.m_ray_range += outer.m_ray_diagonal_delta ;
00827          break ;
00828       default: // line rasterizer moved more than one pixel in one step?
00829          throw misc_error(LOGIC_ERROR) ;
00830          break ;
00831    }
00832 
00833    // Setup for next cell in ray's path and continue ray casting...
00834    outer.m_prev_x = x ;
00835    outer.m_prev_y = y ;
00836    return true ;
00837 }
00838 
00839 } // end of local anonymous namespace encapsulating above helpers
00840 
00841 // Bayesian state estimation consists of a prediction step, wherein we
00842 // apply the control input to figure out a new state, and a correction
00843 // step, wherein we use the latest sensor data to correct the state
00844 // prediction made in the previous step.
00845 //
00846 // In the SLAM problem, the state we're trying to estimate is the robot's
00847 // pose as well as the map of its surroundings. Thus, the correction
00848 // step requires us to correct the latest pose predicted by the action
00849 // model and also update the map using the corrected pose.
00850 //
00851 // This method implements the map correction step of the FastSLAM
00852 // algorithm. It is a variation of the occupancy_grid_mapping procedure
00853 // presented in table 9.1, page 286 of "Probabilistic Robotics" by Thrun,
00854 // Burgard and Fox. Rather than looping over all the cells in the
00855 // occupancy grid and applying the inverse sensor model to those cells
00856 // that lie in the perceptual field of the measurement, we loop over the
00857 // individual range readings making up the latest LRF scan and use ray
00858 // casting to find the cells that lie in the perceptual fields of each of
00859 // the readings and apply the inverse sensor model to those cells.
00860 void Particle::update_map(const std::vector<int>& zt)
00861 {
00862    std::for_each(zt.begin(), zt.end(),
00863                  apply_inverse_range_sensor_model(*m_map, m_pose, m_this)) ;
00864 }
00865 
00866 //----------------------- VISUALIZATION SUPPORT -------------------------
00867 
00868 Particle::Viz::Viz(const Pose& p, float w)
00869    : pose(p), weight(w)
00870 {}
00871 
00872 //-----------------------------------------------------------------------
00873 
00874 } // end of namespace encapsulating this file's definitions
00875 
00876 /* So things look consistent in everyone's emacs... */
00877 /* Local Variables: */
00878 /* indent-tabs-mode: nil */
00879 /* End: */
Generated on Sun May 8 08:41:31 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3