00001 /*!@file BeoSub/Stereo.C */ 00002 00003 // //////////////////////////////////////////////////////////////////// // 00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the // 00005 // University of Southern California (USC) and the iLab at USC. // 00006 // See http://iLab.usc.edu for information about this project. // 00007 // //////////////////////////////////////////////////////////////////// // 00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00010 // in Visual Environments, and Applications'' by Christof Koch and // 00011 // Laurent Itti, California Institute of Technology, 2001 (patent // 00012 // pending; application number 09/912,225 filed July 23, 2001; see // 00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00014 // //////////////////////////////////////////////////////////////////// // 00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00016 // // 00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00018 // redistribute it and/or modify it under the terms of the GNU General // 00019 // Public License as published by the Free Software Foundation; either // 00020 // version 2 of the License, or (at your option) any later version. // 00021 // // 00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00025 // PURPOSE. See the GNU General Public License for more details. // 00026 // // 00027 // You should have received a copy of the GNU General Public License // 00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00030 // Boston, MA 02111-1307 USA. // 00031 // //////////////////////////////////////////////////////////////////// // 00032 // 00033 // Primary maintainer for this file: 00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/BeoSub/Stereo.C $ 00035 // $Id: Stereo.C 14376 2011-01-11 02:44:34Z pez $ 00036 // 00037 00038 #include "BeoSub/CannyEdge.H" 00039 #include "BeoSub/HoughTransform.H" 00040 #include "Component/ModelManager.H" 00041 #include "Devices/FrameGrabberFactory.H" 00042 #include "GUI/XWindow.H" 00043 #include "Image/ColorOps.H" 00044 #include "Image/DrawOps.H" 00045 #include "Image/DrawOps.H" 00046 #include "Image/FilterOps.H" 00047 #include "Image/Image.H" 00048 #include "Image/MorphOps.H" // for closeImg() 00049 #include "Image/Pixels.H" 00050 #include "Image/Transforms.H" 00051 #include "MBARI/Geometry2D.H" 00052 #include "Raster/Raster.H" 00053 #include "SIFT/VisualObject.H" 00054 #include "SIFT/VisualObjectMatch.H" 00055 #include "SIFT/VisualObjectMatchAlgo.H" 00056 #include "Transport/FrameIstream.H" 00057 #include "Util/Timer.H" 00058 #include "Util/Types.H" 00059 #include "Util/log.H" 00060 #include "rutz/shared_ptr.h" 00061 00062 #include <cstdio> 00063 #include <cstdlib> 00064 #include <cstring> 00065 #include <iostream> //needed for segmentImageTrackMC! 00066 #include <math.h> 00067 #include <sstream> 00068 #include <string> 00069 #include <vector> 00070 00071 #define YTHRESH 20 00072 //////////////////////////////////////////////////// 00073 // Stereo Vision 00074 // Randolph Voorhies & Andre Rosa 00075 // 01/15/06 00076 /////////////////////////////////////////////////// 00077 00078 using namespace std; 00079 00080 00081 struct SweepPoint { 00082 unsigned int currentIntensity, targetIntensity; 00083 int xpos, xpos2, ypos; 00084 float disparity; 00085 }; 00086 00087 string IntToString(int num) 00088 { 00089 ostringstream myStream; //creates an ostringstream object 00090 myStream << num << flush; 00091 00092 /* 00093 * outputs the number into the string stream and then flushes 00094 * the buffer (makes sure the output is put into the stream) 00095 */ 00096 00097 return(myStream.str()); //returns the string form of the stringstream object 00098 } 00099 00100 void drawMark(Image< PixRGB<byte> > &cameraImage, int x, int y, int size, float dist) { 00101 00102 drawCross(cameraImage, Point2D<int>(x,y), PixRGB<byte> (255,0,0), size/2); 00103 00104 //writeText(cameraImage, Point2D<int>(x,y), (const char*)IntToString(dist).c_str()); 00105 } 00106 00107 00108 int isolateRed(const Image< PixRGB<byte> > &inputImage, Image< PixRGB<byte> > &outputImage) { 00109 int blackCount = 0; 00110 for(int j = 0; j < inputImage.getHeight(); j++ ) { 00111 for(int i = 0; i < inputImage.getWidth(); i++ ) 00112 { 00113 00114 /* float h1 = inputImage.getVal(i, j).H1(); 00115 float h2 = inputImage.getVal(i, j).H2(); 00116 float s = inputImage.getVal(i, j).S(); 00117 float v = inputImage.getVal(i, j).V();*/ 00118 00119 00120 float avgR = inputImage.getVal(i, j).red(); 00121 float avgG = inputImage.getVal(i, j).green(); 00122 float avgB = inputImage.getVal(i, j).blue(); 00123 00124 float avg = (avgR+avgG+avgB)/3.0; 00125 float sigma = pow((double)(avgR - avg), (double)2.) + pow((double)(avgG - avg), (double)2.) + pow((double)(avgB - avg), (double)2.); 00126 float stdDev = sqrt( (1./3.) * sigma ); 00127 00128 if (avgR > avgG && avgR > avgB && stdDev > 25 && outputImage.coordsOk(i,j)) { 00129 outputImage.setVal(i, j, 255); 00130 } 00131 else { 00132 if(outputImage.coordsOk(i,j)){ 00133 outputImage.setVal(i, j, 0); 00134 blackCount++; 00135 } 00136 } 00137 00138 } 00139 } 00140 00141 return (blackCount*100)/(inputImage.getHeight() * inputImage.getWidth()); 00142 } 00143 00144 00145 int main(int argc, char* argv[]) { 00146 00147 Image< PixRGB<byte> > cameraImage; 00148 Image< PixRGB<byte> > cameraImage2; 00149 Image< PixRGB<byte> > fusedImg; 00150 00151 std::vector<KeypointMatch> matchVec; 00152 matchVec.clear(); 00153 00154 float xMatchDist = 0; 00155 xMatchDist = 0; 00156 00157 XWindow* xwin; 00158 XWindow* xwin2; 00159 00160 ModelManager camManager("Stereo Tester"); 00161 nub::ref<FrameIstream> gb(makeIEEE1394grabber(camManager, "left", "leftcam")); 00162 nub::ref<FrameIstream> gb2(makeIEEE1394grabber(camManager, "right", "rightcam")); 00163 00164 camManager.addSubComponent(gb); 00165 camManager.addSubComponent(gb2); 00166 00167 camManager.loadConfig("camconfig.pmap"); 00168 // play with later camManager.loadConfig("camconfig.pmap"); 00169 gb->setModelParamVal("FrameGrabberSubChan", 0); 00170 gb->setModelParamVal("FrameGrabberBrightness", 128); 00171 gb->setModelParamVal("FrameGrabberHue", 180); 00172 gb->setModelParamVal("FrameGrabberFPS", 15.0F); 00173 00174 gb2->setModelParamVal("FrameGrabberSubChan", 1); 00175 gb2->setModelParamVal("FrameGrabberBrightness", 128); 00176 gb2->setModelParamVal("FrameGrabberHue", 180); 00177 gb2->setModelParamVal("FrameGrabberFPS", 15.0F); 00178 00179 camManager.start(); 00180 00181 00182 cameraImage = gb->readRGB(); 00183 cameraImage2 = gb2->readRGB(); 00184 00185 Image<byte> depthMap(cameraImage2.getDims(), ZEROS); 00186 00187 xwin = new XWindow(cameraImage2.getDims(), 0, 0, "left"); 00188 xwin2 = new XWindow(cameraImage.getDims(), cameraImage.getWidth() + 10, 0, "right"); 00189 new XWindow(Dims(320,480), 0, cameraImage.getHeight() + 30, "together at last"); 00190 new XWindow(cameraImage2.getDims(), cameraImage.getWidth() + 10, cameraImage.getHeight() + 30, "one love"); 00191 new XWindow(depthMap.getDims(), cameraImage.getWidth() + 10, cameraImage.getHeight()*2 + 60, "output"); 00192 00193 rutz::shared_ptr<VisualObject> 00194 voLeft(new VisualObject("left", "leftfilename", cameraImage2, 00195 Point2D<int>(-1,-1), std::vector<float>(), 00196 std::vector< rutz::shared_ptr<Keypoint> >(), true)); 00197 rutz::shared_ptr<VisualObject> 00198 voRight(new VisualObject("right", "rightfilename", cameraImage, 00199 Point2D<int>(-1,-1), std::vector<float>(), 00200 std::vector< rutz::shared_ptr<Keypoint> >(), true)); 00201 rutz::shared_ptr<VisualObjectMatch> matches(new VisualObjectMatch(voLeft, voRight, VOMA_SIMPLE, (uint)10)); 00202 00203 /* 00204 while(1) { 00205 cameraImage = gb->readRGB(); 00206 cameraImage2 = gb2->readRGB(); 00207 00208 xwin->drawImage(cameraImage); 00209 xwin2->drawImage(cameraImage2); 00210 00211 /////////////////////////////////////////////////////////////////////// 00212 voLeft.reset(new VisualObject("left", "leftfilename", cameraImage2)); 00213 voRight.reset(new VisualObject("right", "rightfilename", cameraImage)); 00214 matches.reset(new VisualObjectMatch(voLeft, voRight, VOMA_SIMPLE, (uint)20)); 00215 00216 00217 cout << "cameraImage width: " << cameraImage.getWidth() << endl 00218 << "cameraImage Height: " << cameraImage.getHeight() << endl; 00219 00220 00221 matches->prune(); 00222 00223 float min = 15, max = 0; 00224 00225 if(matches->checkSIFTaffine()) { 00226 00227 fusedImg = matches->getMatchImage(1.0F); 00228 00229 xwin3->drawImage(fusedImg); 00230 xwin4->drawImage(matches->getFusedImage()); 00231 00232 matchVec = matches->getKeypointMatches(); 00233 00234 00235 00236 //////////////////////////////////////////////////////// 00237 00238 for(int i=0; i< (int)matchVec.size(); i++) { 00239 cout << "x1:" << matchVec[i].refkp->getX() << " y1: " << matchVec[i].refkp->getY(); 00240 cout << " x2:" << matchVec[i].tstkp->getX() << " y2: " << matchVec[i].tstkp->getY() << std::endl; 00241 00242 xMatchDist = matchVec[i].refkp->getX() - matchVec[i].tstkp->getX(); 00243 xMatchDist = abs(xMatchDist); 00244 // distance = 3 3/4 in = 0.3125 00245 // focal length = 419.5509189 pixel units 00246 float depth = (0.3125 * 419.5509189) / xMatchDist; 00247 // df / (xl - xr) 00248 //a mathematical model to reduce the change in error 00249 cout << "depth: " << depth << endl; 00250 float error = 0.1 * exp(0.275 * depth); 00251 00252 cout << "error: " << error << endl; 00253 if(error <= 4) 00254 depth -= error; 00255 00256 if(depth > 15 || depth < 0) 00257 depth = 15; 00258 00259 int pixelDepth = (int)(255 - depth * 13); 00260 00261 if(depth > max && depth < 15) 00262 max = depth; 00263 00264 if(depth < min) 00265 min = depth; 00266 00267 cout << "new depth: " << depth << endl; 00268 cout << "pixel depth: " << pixelDepth << endl; 00269 00270 00271 for(int j = -5; j <= 5; j++) 00272 for(int k = -5; k <= 5; k++) 00273 depthMap.setVal((int)matchVec[i].refkp->getX() + j, (int)matchVec[i].refkp->getY() + k, (int)(255 - depth * 13)); 00274 00275 00276 00277 drawMark(cameraImage2, (int)matchVec[i].refkp->getX(), (int)matchVec[i].refkp->getY(), (int)xMatchDist, (255 - depth * 13)); 00278 } 00279 } 00280 00281 cout << "max: " << max << " min: " << min << endl; 00282 xwin5->drawImage(depthMap); 00283 depthMap.clear(); 00284 00285 00286 00287 Raster::waitForKey(); 00288 00289 00290 }*/ 00291 ////////////////////////////////////////////////// disparity by color isolation /////////////////////////////// 00292 while(1) { 00293 cameraImage = gb->readRGB(); 00294 cameraImage2 = gb2->readRGB(); 00295 00296 00297 //Isolate green and white pixels in image, and union the results 00298 isolateRed(cameraImage, cameraImage); 00299 00300 //Fill in any small holes in the image 00301 Image<byte> se(5, 5, ZEROS); se += 255; 00302 Image<byte> closed = closeImg(luminance(cameraImage), se); 00303 00304 //Find the largest white area 00305 Image<byte> mask(closed.getDims(), ZEROS); 00306 Image<byte> camlargest; int largest_n = 0; 00307 Image<byte> msk(closed.getDims(), ZEROS); 00308 for (int j = 0; j < closed.getHeight(); j ++) { 00309 for (int i = 0; i < closed.getWidth(); i ++) { 00310 if (closed.getVal(i, j) == 255 && mask.getVal(i, j) == 0) 00311 { 00312 //LINFO("Flooding (%d, %d)...", i, j); 00313 msk.clear(); 00314 int n = floodClean(closed, msk, Point2D<int>(i, j), byte(128), byte(255), 4); 00315 if (n > largest_n) { largest_n = n; camlargest = msk; } 00316 mask += msk; 00317 } 00318 } 00319 } 00320 00321 00322 Image<byte> lineImage(camlargest.getDims(), ZEROS); 00323 00324 00325 /*int xcount, ycount; 00326 xcount = ycount = 0; 00327 int xavg, yavg; 00328 xavg = yavg = 0; 00329 int pixcount = 0; 00330 for(int j = 0; j < camlargest.getHeight(); j++) { 00331 for(int i = 0; i < camlargest.getWidth(); i++) { 00332 if(camlargest.getVal(i, j) == 255) { 00333 pixcount++; 00334 ycount+=j; 00335 xcount+=i; 00336 } 00337 } 00338 } 00339 00340 xavg = xcount / pixcount; 00341 yavg = ycount / pixcount;*/ 00342 00343 //drawCross(cameraImage, Point2D<int>(xavg, yavg), PixRGB<byte>(0, 255, 0), 10); 00344 00345 //xwin->drawImage(cameraImage); 00346 00347 //////////////////////////// isolate color /////////////////////////////////////// 00348 //Isolate green and white pixels in image, and union the results 00349 isolateRed(cameraImage2, cameraImage2); 00350 00351 //Fill in any small holes in the image 00352 Image<byte> cam2largest; 00353 Image<byte> si(5, 5, ZEROS); si += 255; 00354 closed = closeImg(luminance(cameraImage2), si); 00355 00356 //Find the largest white area 00357 mask = *(new Image<byte>(closed.getDims(), ZEROS)); 00358 largest_n = 0; 00359 msk = *(new Image<byte>(closed.getDims(), ZEROS)); 00360 for (int j = 0; j < closed.getHeight(); j ++) { 00361 for (int i = 0; i < closed.getWidth(); i ++) { 00362 if (closed.getVal(i, j) == 255 && mask.getVal(i, j) == 0) 00363 { 00364 //LINFO("Flooding (%d, %d)...", i, j); 00365 msk.clear(); 00366 int n = floodClean(closed, msk, Point2D<int>(i, j), byte(128), byte(255), 4); 00367 if (n > largest_n) { largest_n = n; cam2largest = msk; } 00368 mask += msk; 00369 } 00370 } 00371 } 00372 00373 ////////////////////////////////////////////////////////////////// 00374 00375 //apply hough transform //////////////////////////////////// 00376 //lineImage = *(new Image<byte>(largest.getDims(), ZEROS)); 00377 00378 ////////////////////// This is a little more complicated in that I need to see the camera outputs since Hough transform is not consistent. 00379 //////////////////////// The pixel method is probably what we just need for Task B. And I suggest we use the SIFT method for Task A. 00380 ////////////////// I will complete this at competition, when I have access to cameras, and if it seems necessary after thorough testing of the other 00381 /////////////// algorithmns. I am concerned whether SIFT will work for Task A, but we will just have to see. Maybe the pixel method will work for Task A. 00382 ////////////// But here is an outline of how this algorithmn should work: 00383 /* Select one line from the lines in the left camera Image. Check to see if it is the left or right edge, based on whether a black pixel resides to the left 00384 or right of it. Find a similar line in the right Image. If you do. Find the equations of both lines. Find the x values of each line when y = 0. 00385 Do a disparity calculation, and get the distance between those interpolated points. 00386 If you do not find the same line. Find a different line in the left camera Image, and repeat. The problem I have with this method is that it is very 00387 specific to Task B. I think with the pixel method, we can generalize it enough for other things. */ 00388 /*vector<LineSegment2D> lines, lines2; 00389 unsigned char* edge, *edge2; 00390 char* file1 = NULL; 00391 00392 canny(camlargest.getArrayPtr(), camlargest.getHeight(), camlargest.getWidth(), 0.7, 0.2, 0.97, &edge, file1); 00393 canny(cam2largest.getArrayPtr(), cam2largest.getHeight(), cam2largest.getWidth(), 0.7, 0.2, 0.97, &edge2, file1); 00394 00395 Image<unsigned char> edgeImage(edge, camlargest.getWidth(), camlargest.getHeight()); 00396 Image<unsigned char> edgeImage2(edge2, cam2largest.getWidth(), cam2largest.getHeight()); 00397 00398 lines = houghTransform(edgeImage, PI/180, 1, 50, cameraImage); 00399 lines2 = houghTransform(edgeImage2, PI/180, 1, 50, cameraImage2); 00400 SweepPoint pair, pair2; 00401 for(int i = 0; i < lines.size(); i++) { 00402 Point2D<int> testPoint = lines[i].point1(); 00403 if(camlargest.coordsOK(testPoint.i + 5, testPoint.j)) { 00404 pair.currentIntensity = camlargest.getVal(testPoint.i + 5, testPoint.j); 00405 break; 00406 } 00407 } 00408 00409 for(int i = 0; i < lines.size(); i++) { 00410 Point2D<int> testPoint = lines[i].point1(); 00411 if(cam2largest.coordsOK(testPoint.i + 5, testPoint.j) AND pair.currentIntensity == cam2largest.getVal(testPoint.i + 5, testPoint.j)) { 00412 // solve for equation. same line found. 00413 } 00414 else{ // look for another line 00415 } 00416 }*/ 00417 /////////////////////////////////////////////////////////////////////// 00418 00419 //////////////////////////////////////////////////// 00420 //pixel approach 00421 // look for valid pixels, ie we will only sweep through the y positions that have valid pixels, 00422 // which are same color on both left and right camera. 00423 vector<SweepPoint> validPixels; 00424 for(int j = 0; j < camlargest.getHeight(); j++) { 00425 if(camlargest.getVal(0, j) == cam2largest.getVal(0, j)) { 00426 SweepPoint valid; 00427 valid.currentIntensity = camlargest.getVal(0, j); 00428 valid.currentIntensity == 255 ? valid.targetIntensity = 0 : valid.targetIntensity = 255; 00429 valid.ypos = j; 00430 valid.xpos = -1; 00431 valid.xpos2 = -1; 00432 validPixels.push_back(valid); 00433 } 00434 } 00435 00436 00437 // sweep through both images and find change in pixel intensity and save x positions found at in each image, we have found the edge of the object 00438 // note: for optimiatzation we should either stop when we reached the end of the image OR when all x positions have been found 00439 for(int i = 0; i < camlargest.getWidth(); i++) { 00440 for(unsigned int j = 0; j < validPixels.size(); j++) { 00441 if(validPixels[j].xpos == -1 && camlargest.getVal(i, validPixels[j].ypos) == validPixels[j].targetIntensity) 00442 validPixels[j].xpos = i; 00443 00444 if(validPixels[j].xpos2 == -1 && cam2largest.getVal(i, validPixels[j].ypos) == validPixels[j].targetIntensity) 00445 validPixels[j].xpos2 = i; 00446 00447 } 00448 } 00449 00450 //int xavg, xavg2; 00451 float disparityAvg = 0.0; 00452 float sum = 0.0, mean = 0.0; 00453 Image< PixRGB<byte> > cross(camlargest); 00454 Image< PixRGB<byte> > cross2(cam2largest); 00455 00456 // find the disparity and the standard deviation 00457 if(!validPixels.empty()) { 00458 vector<SweepPoint>::iterator iter = validPixels.begin(); 00459 while(iter != validPixels.end()) { 00460 if((*iter).xpos != -1 && (*iter).xpos2 != -1) { 00461 xMatchDist = (*iter).xpos2 - (*iter).xpos; 00462 sum += (*iter).disparity = abs(xMatchDist); 00463 iter++; 00464 } 00465 else // if disparity not possible, remove it from the list 00466 validPixels.erase(iter); 00467 00468 } 00469 00470 mean = sum / validPixels.size(); 00471 00472 float stdsum = 0.0, stdsqr = 0.0; 00473 for (uint i = 0; i < validPixels.size(); i++) { 00474 stdsum = validPixels[i].disparity - mean; 00475 stdsqr+=(stdsum*stdsum); 00476 } 00477 00478 float stddev = stdsqr / validPixels.size(); 00479 stddev = (float)sqrt((double)stddev); 00480 00481 // kick out those disparites that do not fit, and average those that do 00482 iter = validPixels.begin(); 00483 while(iter != validPixels.end()) { 00484 if((*iter).disparity > (mean + stddev) || (*iter).disparity < (mean - stddev)) 00485 validPixels.erase(iter); 00486 else { 00487 drawCross(cross, Point2D<int>((*iter).xpos, (*iter).ypos), PixRGB<byte>(0, 255, 0), 3); 00488 drawCross(cross2, Point2D<int>((*iter).xpos2, (*iter).ypos), PixRGB<byte>(0, 255, 0), 3); 00489 disparityAvg = (float)(*iter).disparity; 00490 iter++; 00491 } 00492 } 00493 00494 // averaged disparity, depth is calculated later below 00495 disparityAvg = disparityAvg / validPixels.size(); 00496 00497 //xavg = validPixels[0].xpos; 00498 //xavg2 = validPixels[0].xpos2; 00499 00500 } 00501 else //dummy values, no edge had been found. 00502 disparityAvg = -1.0; 00503 00504 xwin->drawImage(cross); 00505 xwin2->drawImage(cross2); 00506 /* lineImage = *(new Image<byte>(largest.getDims(), ZEROS)); 00507 00508 00509 00510 int xavg2, yavg2; 00511 int xcount2, ycount2; 00512 int pixcount2; 00513 xcount2 = ycount2 = 0; 00514 xavg2 = yavg2 = 0; 00515 pixcount2 = 0; 00516 for(int j = 0; j < largest.getHeight(); j++) { 00517 for(int i = 0; i < largest.getWidth(); i++) { 00518 if(largest.getVal(i, j) == 255) { 00519 pixcount2++; 00520 ycount2+=j; 00521 xcount2+=i; 00522 } 00523 } 00524 } 00525 00526 xavg2 = xcount2 / pixcount2; 00527 yavg2 = ycount2 / pixcount2; 00528 00529 */ 00530 // drawCross(cameraImage2, Point2D<int>(xavg2, yavg2), PixRGB<byte>(0, 255, 0), 10); 00531 00532 //xwin2->drawImage(cameraImage2); 00533 //int xavg2 = 0; 00534 00535 00536 float max, min; 00537 max = 15; 00538 min = 0; 00539 00540 //xMatchDist = xavg - xavg2; 00541 //xMatchDist = abs(xMatchDist); 00542 // distance = 3 3/4 in = 0.3125 00543 // focal length = 419.5509189 pixel units 00544 float depth = (0.3125 * 419.5509189) / disparityAvg; 00545 // df / (xl - xr) 00546 //a mathematical model to reduce the change in error 00547 cout << "depth: " << depth << endl; 00548 float error = 0.1 * exp(0.275 * depth); 00549 00550 cout << "error: " << error << endl; 00551 if(error <= 4) 00552 depth -= error; 00553 00554 if(depth > 15 || depth < 0) 00555 depth = 15; 00556 00557 int pixelDepth = (int)(255 - depth * 13); 00558 00559 if(depth > max && depth < 15) 00560 max = depth; 00561 00562 if(depth < min) 00563 min = depth; 00564 00565 cout << "new depth: " << depth << endl; 00566 cout << "pixel depth: " << pixelDepth << endl; 00567 00568 00569 /* for(int j = -5; j <= 5; j++) 00570 for(int k = -5; k <= 5; k++) 00571 depthMap.setVal(xavg + j, yavg + k, (int)(255 - depth * 13));*/ 00572 00573 00574 00575 //drawMark(cameraImage2, xavg, yavg, (int)xMatchDist, (255 - depth * 13)); 00576 00577 Raster::waitForKey(); 00578 } ////////////////////////////////////////////-- end disparity by color isolation ////////////////////////////// 00579 00580 return 0; 00581 } 00582 00583 // ###################################################################### 00584 /* So things look consistent in everyone's emacs... */ 00585 /* Local Variables: */ 00586 /* indent-tabs-mode: nil */ 00587 /* End: */