00001 /*!@file SeaBee/PipeRecognizer.C finds pipelines in an image */ 00002 // //////////////////////////////////////////////////////////////////// // 00003 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the // 00004 // University of Southern California (USC) and the iLab at USC. // 00005 // See http://iLab.usc.edu for information about this project. // 00006 // //////////////////////////////////////////////////////////////////// // 00007 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00008 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00009 // in Visual Environments, and Applications'' by Christof Koch and // 00010 // Laurent Itti, California Institute of Technology, 2001 (patent // 00011 // pending; application number 09/912,225 filed July 23, 2001; see // 00012 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00013 // //////////////////////////////////////////////////////////////////// // 00014 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00015 // // 00016 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00017 // redistribute it and/or modify it under the terms of the GNU General // 00018 // Public License as published by the Free Software Foundation; either // 00019 // version 2 of the License, or (at your option) any later version. // 00020 // // 00021 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00022 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00023 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00024 // PURPOSE. See the GNU General Public License for more details. // 00025 // // 00026 // You should have received a copy of the GNU General Public License // 00027 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00028 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00029 // Boston, MA 02111-1307 USA. // 00030 // //////////////////////////////////////////////////////////////////// // 00031 // 00032 // Primary maintainer for this file: Michael Montalbo <montalbo@usc.edu> 00033 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/SeaBee/PipeRecognizer.C $ 00034 // $Id: PipeRecognizer.C 12962 2010-03-06 02:13:53Z irock $ 00035 00036 #ifdef HAVE_OPENCV 00037 00038 #include "GUI/XWinManaged.H" 00039 #include "Image/Image.H" 00040 #include "Image/Pixels.H" 00041 #include "Util/Types.H" 00042 #include "Util/log.H" 00043 #include "Image/DrawOps.H" 00044 00045 #include "MBARI/Geometry2D.H" 00046 #include "Image/OpenCVUtil.H" 00047 #include "Image/ColorOps.H" 00048 00049 #include "SeaBee/VisionRecognizer.H" 00050 #include "SeaBee/PipeRecognizer.H" 00051 00052 // ###################################################################### 00053 PipeRecognizer::PipeRecognizer() 00054 { 00055 } 00056 00057 // ###################################################################### 00058 PipeRecognizer::~PipeRecognizer() 00059 { } 00060 00061 // ###################################################################### 00062 std::vector<LineSegment2D> PipeRecognizer::getPipeLocation 00063 (rutz::shared_ptr<Image<PixRGB <byte> > > colorSegmentedImage, 00064 rutz::shared_ptr<Image<PixRGB <byte> > > outputImage, 00065 PipeRecognizeMethod method) 00066 { 00067 if(!colorSegmentedImage->initialized()) 00068 return std::vector<LineSegment2D>(); 00069 00070 Image<byte> lum = luminance(*colorSegmentedImage); 00071 00072 switch(method) 00073 { 00074 case HOUGH: 00075 return calculateHoughTransform(lum, 00076 outputImage); 00077 break; 00078 00079 // case LINE_BEST_FIT: 00080 // return calculateLineBestFit (colorSegmentedImage, 00081 // outputImage, 00082 // pipeCenter, 00083 // pipeAngle 00084 // ); 00085 // break; 00086 00087 // case CONTOUR: 00088 // return calculateContours (colorSegmentedImage, 00089 // outputImage, 00090 // pipeCenter, 00091 // pipeAngle 00092 // ); 00093 // break; 00094 00095 default: 00096 LERROR("Invalid pipe recognizer method specified"); 00097 return std::vector<LineSegment2D>(); 00098 } 00099 } 00100 00101 // ###################################################################### 00102 std::vector<LineSegment2D> PipeRecognizer::calculateHoughTransform 00103 (Image<byte>& colorSegmentedImage, 00104 rutz::shared_ptr<Image<PixRGB<byte> > > outputImage) 00105 { 00106 #ifndef HAVE_OPENCV 00107 LFATAL("OpenCV must be installed in order to use this function"); 00108 #else 00109 // Do edge detection (canny) on the image. 00110 IplImage cannyImage = getCannyImage( colorSegmentedImage ); 00111 00112 // Clear output image and set it equal to canny image. 00113 // outputImage->clear(); 00114 //rutz::shared_ptr<Image<PixRGB<byte> > > temp(new Image<PixRGB<byte> > ( toRGB( ipl2gray( &cannyImage ) ) ); // Cannot convert directly to RGB 00115 //since cannyImage has only 1 channel (black and white). 00116 // temp.resize(outputImage->getDims()); 00117 // *outputImage += temp; 00118 00119 // Do Hough transform. 00120 std::vector <LineSegment2D> lineSegments = getHoughLines( cannyImage ); 00121 00122 // Loop through hough lines and draw them to the screen. 00123 for(uint i = 0; i < lineSegments.size(); i++ ) 00124 { 00125 Point2D<int> pt1 = lineSegments[i].point1(); 00126 Point2D<int> pt2 = lineSegments[i].point2(); 00127 //draw line segment in output image 00128 drawLine(*outputImage, pt1, pt2, PixRGB<byte>(255,0,0)); 00129 } 00130 00131 std::vector <LineSegment2D> prunedHoughLines = pruneHoughLines( lineSegments ); 00132 00133 return prunedHoughLines; 00134 00135 #endif // HAVE_OPENCV 00136 } 00137 00138 // ###################################################################### 00139 00140 uint PipeRecognizer::calculateLineBestFit 00141 (Image<byte> &colorSegmentedImage, 00142 Image<PixRGB <byte> > &outputImage, 00143 Point2D<int> &pipeCenter, 00144 double &pipeAngle) 00145 {return 0;} 00146 00147 uint PipeRecognizer::calculateContours 00148 (Image<byte> &colorSegmentedImage, 00149 Image<PixRGB <byte> > &outputImage, 00150 Point2D<int> &pipeCenter, 00151 double &pipeAngle) 00152 {return 0;} 00153 00154 // double PipeRecognizer::getOrangePixels(Image<byte> &cameraImage, 00155 // double &avgX, 00156 // double &avgY, 00157 // double &sumX, 00158 // double &sumY) 00159 // { 00160 // Timer tim(1000000); 00161 00162 // std::vector <Point2D<int> > edgePoints; 00163 // uint w = cameraImage.getWidth(); 00164 // uint h = cameraImage.getHeight(); 00165 00166 // Image<byte> (*colorSegmentedImage)(w,h, ZEROS); 00167 00168 // (*colorSegmentedImage) = cameraImage; 00169 00170 // avgX = 0.0; 00171 // avgY = 0.0; 00172 // sumX = 0.0; 00173 // sumY = 0.0; 00174 00175 // //Isolate the orange pixels in the image 00176 // tim.reset(); 00177 00178 // // isolateOrange(cameraImage, orangeIsoImage); //, fnb=0; 00179 00180 00181 // //find all the white edge pixels in the image and store them 00182 // for(int y = 0; y < orangeIsoImage.getHeight(); y++) 00183 // { 00184 // for(int x = 0; x < orangeIsoImage.getWidth(); x++) 00185 // { 00186 // if(orangeIsoImage.getVal(x,y) == 255) 00187 // { 00188 // // convert the x,y position of the pixel to an x,y position where 00189 // // the center of the image is the origin as opposed to the top left corner 00190 // // and store the pixel 00191 // edgePoints.push_back(Point2D<int>(x, y)); 00192 00193 // sumX += x; 00194 // sumY += y; 00195 // } 00196 // } 00197 // } 00198 00199 // avgX = sumX/edgePoints.size(); 00200 // avgY = sumY/edgePoints.size(); 00201 00202 // return getSlope(orangeIsoImage, edgePoints, avgX, avgY, sumX, sumY); 00203 // } 00204 00205 // double PipeRecognizer::getSlope(Image<byte> &cameraImage, 00206 // std::vector <Point2D<int> > &points, 00207 // double avgX, 00208 // double avgY, 00209 // double sumX, 00210 // doubley sumY) 00211 // { 00212 // double top = 0.0; 00213 // double bottom = 0.0; 00214 // double top2 = 0.0; 00215 // double bottom2 = 0.0; 00216 // double return_value = 0.0; 00217 // double return_value2 = 0.0; 00218 00219 // int x = 0; 00220 // int y = 0; 00221 00222 // /* loop through all the points in the picture and generate a slope 00223 // by finding the line of best fit*/ 00224 // for(uint i = 0; i < points.size(); i++) 00225 // { 00226 // x = points[i].i; 00227 // y = points[i].j; 00228 00229 // top += (x - avgX) * (y - avgY); 00230 // bottom += (x - avgX) * (x - avgX); 00231 00232 // int tx = x- cameraImage.getWidth()/2; 00233 // int ty = y- cameraImage.getHeight()/2; 00234 // x = ty +cameraImage.getHeight()/2; 00235 // y = -tx + cameraImage.getWidth()/2; 00236 00237 // top2 += (x - avgX) * (y - avgY); 00238 // bottom2 += (x - avgX) * (x - avgX); 00239 00240 // } 00241 00242 // if( bottom != 0.0 ) 00243 // return_value = atan2(top,bottom); 00244 // else 00245 // return_value = 1.62; //if the bottom is zero, we have a vertical line, 00246 // //so we want to return pi/2 00247 00248 // if( bottom2 != 0.0 ) 00249 // return_value2 = (atan2(top2,bottom2)+3.14159/2); 00250 // else 00251 // return_value2 = (1.62+3.14159/2); 00252 00253 00254 // double e1 = 0.0; 00255 // double e2 = 0.0; 00256 // for(uint i = 0; i < points.size(); i++) 00257 // { 00258 00259 // x = points[i].i; 00260 // y = points[i].j; 00261 00262 // e1 =pow(x/bottom*top+avgY-y,2); 00263 00264 // int tx = x- cameraImage.getWidth()/2; 00265 // int ty = y- cameraImage.getHeight()/2; 00266 // x = ty +cameraImage.getHeight()/2; 00267 // y = -tx + cameraImage.getWidth()/2; 00268 00269 00270 // e2 =pow(x/bottom2*top2+avgY-y,2); 00271 // } 00272 00273 00274 // if(e1<e2) 00275 // return return_value; 00276 // return return_value2; 00277 // } 00278 00279 #endif 00280 00281 // ###################################################################### 00282 /* So things look consistent in everyone's emacs... */ 00283 /* Local Variables: */ 00284 /* indent-tabs-mode: nil */ 00285 /* End: */