MotionOps.C

Go to the documentation of this file.
00001 /*!@file Robots/Beobot2/Navigation/FOE_Navigation/MotionOps.C
00002   various motion related functions. 
00003   For example: Lucas&Kanade, Horn&Schunck optical flow                  */
00004 // //////////////////////////////////////////////////////////////////// //
00005 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00006 // University of Southern California (USC) and the iLab at USC.         //
00007 // See http://iLab.usc.edu for information about this project.          //
00008 // //////////////////////////////////////////////////////////////////// //
00009 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00010 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00011 // in Visual Environments, and Applications'' by Christof Koch and      //
00012 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00013 // pending; application number 09/912,225 filed July 23, 2001; see      //
00014 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00015 // //////////////////////////////////////////////////////////////////// //
00016 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00017 //                                                                      //
00018 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00019 // redistribute it and/or modify it under the terms of the GNU General  //
00020 // Public License as published by the Free Software Foundation; either  //
00021 // version 2 of the License, or (at your option) any later version.     //
00022 //                                                                      //
00023 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00024 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00025 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00026 // PURPOSE.  See the GNU General Public License for more details.       //
00027 //                                                                      //
00028 // You should have received a copy of the GNU General Public License    //
00029 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00030 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00031 // Boston, MA 02111-1307 USA.                                           //
00032 // //////////////////////////////////////////////////////////////////// //
00033 //
00034 // Primary maintainer for this file: Christian Siagian <siagian@caltech.edu>
00035 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Robots/Beobot2/Navigation/FOE_Navigation/MotionOps.C $
00036 // $Id: $
00037 
00038 #ifndef MOTIONOPS_C_DEFINED
00039 #define MOTIONOPS_C_DEFINED
00040 
00041 #define MAX_NUM_FEATURES   1000
00042 
00043 // OpenCV must be first to avoid conflicting defs of int64, uint64
00044 #include "Image/OpenCVUtil.H"
00045 
00046 #include "Image/CutPaste.H"
00047 #include "Image/ColorOps.H"
00048 #include "Image/MathOps.H"
00049 #include "Image/DrawOps.H"
00050 
00051 #include <cstdio>
00052 
00053 #include "Robots/Beobot2/Navigation/FOE_Navigation/MotionOps.H"
00054 
00055 inline static void allocateOnDemand
00056 ( IplImage **img, CvSize size, int depth, int channels) 
00057 {
00058   if ( *img != NULL ) return;
00059   *img = cvCreateImage( size, depth, channels );
00060 }
00061 
00062 // ######################################################################
00063 rutz::shared_ptr<OpticalFlow> getCleanOpticFlow(Image<byte> img)
00064 {
00065   uint width  = img.getWidth();
00066   uint height = img.getHeight(); 
00067 
00068   // tanslation
00069   float ratio = 133.33*1000.0;
00070 
00071   float Tx =  0.0 * ratio;
00072   float Ty =  0.0 * ratio;
00073   float Tz =   .5 * ratio;
00074 
00075   // rotation
00076   float Ox = 0.0;
00077   float Oy = 0.0;
00078   float Oz = 0.0;
00079 
00080   // focal length
00081   float f = 400.0;
00082 
00083   // range of depth is 11 to 33m
00084   float startDepth =  11.0*ratio;
00085   float rangeDepth = 200.0*ratio;
00086 
00087   std::vector<Point2D<float> > startPt(MAX_NUM_FEATURES);
00088   std::vector<Point2D<float> >   endPt(MAX_NUM_FEATURES);
00089 
00090   float w2 = width/2;
00091   float h2 = height/2;
00092 
00093   // pick 1000 random points and depth as well  
00094   for(uint i = 0; i < MAX_NUM_FEATURES; i++)
00095     {
00096       // use the central coordinate system 
00097       // : origin in w/2, h/2 of image coordinate
00098       float x =   float(width  * float(rand())/(RAND_MAX + 1.0F)) - w2;
00099       float y =   float(height * float(rand())/(RAND_MAX + 1.0F)) - h2;
00100 
00101       float z = rangeDepth * float(rand())/(RAND_MAX + 1.0F) + startDepth;
00102       
00103       float dx = 1.0/z*(-f*Tx + x*Tz) + ( x*y/f   *Ox - (f+x*x/f)*Oy + y*Oz);
00104       float dy = 1.0/z*(-f*Ty + y*Tz) + ((f+y*y/f)*Ox - (x*y/f)  *Oy + x*Oz);
00105 
00106       startPt[i] = Point2D<float>((x+w2)   , (y+h2)   );
00107       endPt  [i] = Point2D<float>((x+w2)+dx, (y+h2)+dy);
00108 
00109       //LINFO("d[%f %f]", dx, dy);
00110       //Raster::waitForKey();
00111     }
00112 
00113    std::vector<rutz::shared_ptr<FlowVector> > fv; fv.clear();
00114    for(int i = 0; i < MAX_NUM_FEATURES; i++)
00115      {
00116        rutz::shared_ptr<FlowVector> tflow
00117          (new FlowVector(startPt[i], endPt[i], 1.0));
00118        fv.push_back(tflow);
00119 
00120        LDEBUG("[%3d] flow: 1[%13.4f %13.4f] 2[%13.4f %13.4f] "
00121               "ang: %13.4f mag: %13.4f val: %13.4f",
00122               i, tflow->p1.i, tflow->p1.j, tflow->p2.i, tflow->p2.j, 
00123               tflow->angle, tflow->mag, tflow->val);
00124      }
00125 
00126   
00127    // create the optical flow
00128    rutz::shared_ptr<OpticalFlow> oflow(new OpticalFlow(fv, img.getDims()));
00129 
00130    return oflow;
00131 }
00132 
00133 // ######################################################################
00134 void saveCleanOpticFlow(Image<byte> img)
00135 {
00136   uint width  = img.getWidth();
00137   uint height = img.getHeight(); 
00138 
00139   // tanslation
00140   float ratio = 133.33*1000.0;
00141 
00142   float Tx =  0.0 * ratio;
00143   float Ty =  0.0 * ratio;
00144   float Tz =   .5 * ratio;
00145 
00146   // rotation
00147   float Ox = 0.0;
00148   float Oy = 0.0;
00149   float Oz = 0.0;
00150 
00151   // focal length
00152   float f = 400.0;
00153 
00154   // range of depth is 11 to 33m
00155   float startDepth =  11.0*ratio;
00156   float rangeDepth = 200.0*ratio;
00157 
00158   std::vector<Point2D<float> > startPt(MAX_NUM_FEATURES);
00159   std::vector<Point2D<float> >   endPt(MAX_NUM_FEATURES);
00160 
00161   float w2 = width/2;
00162   float h2 = height/2;
00163 
00164   std::string resFName("results.txt");
00165   FILE *rFile = fopen(resFName.c_str(), "at");
00166   //if (rFile != NULL)
00167 
00168   // pick 1000 random points and depth as well  
00169   for(uint i = 0; i < MAX_NUM_FEATURES; i++)
00170     {
00171       // use the central coordinate system 
00172       // : origin in w/2, h/2 of image coordinate
00173       float x =   float(width  * float(rand())/(RAND_MAX + 1.0F)) - w2;
00174       float y =   float(height * float(rand())/(RAND_MAX + 1.0F)) - h2;
00175 
00176       float z = rangeDepth * float(rand())/(RAND_MAX + 1.0F) + startDepth;
00177       
00178       float dx = 1.0/z*(-f*Tx + x*Tz) + ( x*y/f   *Ox - (f+x*x/f)*Oy + y*Oz);
00179       float dy = 1.0/z*(-f*Ty + y*Tz) + ((f+y*y/f)*Ox - (x*y/f)  *Oy + x*Oz);
00180 
00181       //startPt[i] = Point2D<float>((x+w2)   , (y+h2)   );
00182       //endPt  [i] = Point2D<float>((x+w2)+dx, (y+h2)+dy);
00183 
00184       float ox = x+w2;
00185       float oy = y+h2;
00186       LINFO("%f %f", ox,      oy);
00187       LINFO("%f %f", ox+dx/2, oy+dy/2);
00188       LINFO("%f %f", ox+dx,   oy+dy);
00189 
00190       LDEBUG("saving result to %s", resFName.c_str());
00191       std::string line1 = sformat("%f %f \n", ox,      oy);
00192       std::string line2 = sformat("%f %f \n", ox+dx/2, oy+dy/2);
00193       std::string line3 = sformat("%f %f \n", ox+dx,   oy+dy);
00194 
00195       fputs(line1.c_str(), rFile);
00196       fputs(line2.c_str(), rFile);
00197       fputs(line3.c_str(), rFile);
00198     }
00199 
00200   fclose (rFile);
00201 
00202   Raster::waitForKey();
00203 }
00204 
00205 // ######################################################################
00206 rutz::shared_ptr<OpticalFlow> getLucasKanadeOpticFlow
00207 (Image<byte> image1, Image<byte> image2)
00208 {
00209   IplImage* frame1_1C = img2ipl(image1); 
00210   IplImage* frame2_1C = img2ipl(image2); 
00211   
00212   CvSize frame_size;
00213   frame_size.width  = image1.getWidth();  
00214   frame_size.height = image1.getHeight();
00215   
00216   static IplImage 
00217     *eig_image = NULL, *temp_image = NULL, 
00218     *pyramid1 = NULL, *pyramid2 = NULL;
00219 
00220   // Shi and Tomasi Feature Tracking!
00221 
00222   // Preparation: Allocate the necessary storage.
00223   allocateOnDemand( &eig_image,  frame_size, IPL_DEPTH_32F, 1 ); 
00224   allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );
00225   
00226   // Preparation: This array will contain the features found in frame 1.
00227   CvPoint2D32f frame1_features[MAX_NUM_FEATURES];
00228   int number_of_features = MAX_NUM_FEATURES;
00229   
00230   // Actually run the Shi and Tomasi algorithm!!:
00231   //   "frame1_1C" is the input image. 
00232   //    "eig_image" and "temp_image" are just workspace for the algorithm.
00233   //    The first ".01" specifies the minimum quality of the features 
00234   //      (based on the eigenvalues). 
00235   //    The second ".01" specifies the minimum Euclidean distance between features
00236   //    "NULL" means use the entire input image. can be just part of the image
00237   //    WHEN THE ALGORITHM RETURNS: * "frame1_features" 
00238   //      will contain the feature points. 
00239   //    "number_of_features" will be set to a value <= MAX_NUM_FEATURES 
00240   //        indicating the number of feature points found.
00241   cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, 
00242                         & number_of_features, .01, .01, NULL);
00243 
00244   // Pyramidal Lucas Kanade Optical Flow!
00245   
00246   CvPoint2D32f frame2_features[MAX_NUM_FEATURES];
00247   char optical_flow_found_feature[MAX_NUM_FEATURES];
00248   float optical_flow_feature_error[MAX_NUM_FEATURES];
00249 
00250   // // Window size used to avoid the aperture problem 
00251   CvSize optical_flow_window = cvSize(3,3);
00252 
00253   // termination criteria: 
00254   // stop after 20 iterations or epsilon is better than .3. 
00255   // play with these parameters for speed vs. accuracy 
00256   CvTermCriteria optical_flow_termination_criteria = 
00257     cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
00258 
00259   // This is some workspace for the algorithm. * (The algorithm
00260   // actually carves the image into pyramids of different resolutions
00261   allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 ); 
00262   allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );
00263 
00264   LINFO("number of features: %d", number_of_features);
00265 
00266    // Actually run Pyramidal Lucas Kanade Optical Flow!! 
00267    // "5" is the maximum number of pyramids to use. 0: just one level
00268    // level.
00269    // last "0" means disable enhancements. 
00270    // (For example, the second array isn't preinitialized with guesses.)
00271    cvCalcOpticalFlowPyrLK
00272      (frame1_1C, frame2_1C, pyramid1, pyramid2, 
00273       frame1_features, frame2_features, number_of_features, 
00274       optical_flow_window, 5, optical_flow_found_feature, 
00275       optical_flow_feature_error, optical_flow_termination_criteria, 0 );
00276 
00277    std::vector<rutz::shared_ptr<FlowVector> > fv; fv.clear();
00278    for(int i = 0; i < number_of_features; i++)
00279      {
00280        // If Pyramidal Lucas Kanade didn't really find the feature, skip it.
00281        if ( optical_flow_found_feature[i] == 0 ) continue;
00282        
00283        rutz::shared_ptr<FlowVector> tflow
00284          (new FlowVector
00285           (Point2D<float>((float)frame1_features[i].x, 
00286                           (float)frame1_features[i].y),
00287            Point2D<float>((float)frame2_features[i].x, 
00288                           (float)frame2_features[i].y),
00289            optical_flow_found_feature[i]));
00290 
00291        fv.push_back(tflow);
00292 
00293        LDEBUG("[%3d] flow: 1[%13.4f %13.4f] 2[%13.4f %13.4f] "
00294               "ang: %13.4f mag: %13.4f val: %13.4f",
00295               i, tflow->p1.i, tflow->p1.j, tflow->p2.i, tflow->p2.j, 
00296               tflow->angle, tflow->mag, tflow->val);
00297      }
00298 
00299    // create the optical flow
00300    rutz::shared_ptr<OpticalFlow> oflow
00301      (new OpticalFlow(fv, image1.getDims()));
00302 
00303    return oflow;
00304 }
00305 
00306 // ######################################################################
00307 Image<PixRGB<byte> > drawOpticFlow
00308 (Image<PixRGB<byte> > img, rutz::shared_ptr<OpticalFlow> oflow)
00309 {
00310   std::vector<rutz::shared_ptr<FlowVector> > flow =
00311     oflow->getFlowVectors();
00312 
00313   // draw the flow field
00314   for(uint i = 0; i < flow.size(); i++)
00315     {
00316       Point2D<int> pt1((int)flow[i]->p1.i, (int)flow[i]->p1.j); 
00317       Point2D<int> pt2((int)flow[i]->p2.i, (int)flow[i]->p2.j); 
00318       drawLine(img, pt1, pt2, PixRGB<byte>(255,0,0), 1);
00319           
00320       // // for a nice visualization lengthen by a factor of 3.      
00321       // CvPoint p,q; 
00322       // p.x = (int) frame1_features[i].x; 
00323       // p.y = (int) frame1_features[i].y; 
00324       // q.x = (int) frame2_features[i].x; 
00325       // q.y = (int) frame2_features[i].y;
00326       
00327       // double angle = atan2( (double) p.y - q.y, (double) p.x - q.x ); 
00328       // double hypotenuse = sqrt( pow(p.y - q.y, 2.0) + pow(p.x - q.x, 2.0) );
00329       // float factor = 3.0;
00330       // q.x = (int) (p.x - factor * hypotenuse * cos(angle)); 
00331       // q.y = (int) (p.y - factor * hypotenuse * sin(angle));
00332       
00333       // // "CV_AA" means antialiased drawing. 
00334       // // "0" means no fractional bits in the center coordinate or radius.
00335       // cvLine( retFrame, p, q, line_color, line_thickness, CV_AA, 0 );
00336        
00337       // // Now draw the tips of the arrow.
00338       // p.x = (int) (q.x + 9 * cos(angle + M_PI / 4)); 
00339       // p.y = (int) (q.y + 9 * sin(angle + M_PI / 4)); 
00340       // cvLine( retFrame, p, q, line_color, line_thickness, CV_AA, 0 ); 
00341       // p.x = (int) (q.x + 9 * cos(angle - M_PI / 4)); 
00342       // p.y = (int) (q.y + 9 * sin(angle - M_PI / 4)); 
00343       // cvLine( retFrame, p, q, line_color, line_thickness, CV_AA, 0 );
00344     }
00345 
00346    return img; 
00347 }
00348 
00349 // ######################################################################
00350 /* So things look consistent in everyone's emacs... */
00351 /* Local Variables: */
00352 /* indent-tabs-mode: nil */
00353 /* End: */
00354 
00355 #endif // !MOTIONOPS_C_DEFINED
Generated on Sun May 8 08:05:36 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3