00001 /*!@file InferotemporalCortexI.C Recognize Objects */ 00002 00003 //////////////////////////////////////////////////////////////////// // 00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the // 00005 // University of Southern California (USC) and the iLab at USC. // 00006 // See http://iLab.usc.edu for information about this project. // 00007 // //////////////////////////////////////////////////////////////////// // 00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00010 // in Visual Environments, and Applications'' by Christof Koch and // 00011 // Laurent Itti, California Institute of Technology, 2001 (patent // 00012 // pending; application number 09/912,225 filed July 23, 2001; see // 00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00014 // //////////////////////////////////////////////////////////////////// // 00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00016 // // 00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00018 // redistribute it and/or modify it under the terms of the GNU General // 00019 // Public License as published by the Free Software Foundation; either // 00020 // version 2 of the License, or (at your option) any later version. // 00021 // // 00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00025 // PURPOSE. See the GNU General Public License for more details. // 00026 // // 00027 // You should have received a copy of the GNU General Public License // 00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00030 // Boston, MA 02111-1307 USA. // 00031 // //////////////////////////////////////////////////////////////////// // 00032 // 00033 // Primary maintainer for this file: Lior Elazary <lelazary@yahoo.com> 00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Robots/RobotBrain/InferotemporalCortexI.C $ 00035 // $Id: InferotemporalCortexI.C 10794 2009-02-08 06:21:09Z itti $ 00036 // 00037 00038 #include "Component/ModelManager.H" 00039 #include "Image/Image.H" 00040 #include "Image/DrawOps.H" 00041 #include "GUI/DebugWin.H" 00042 #include "Robots/RobotBrain/InferotemporalCortexI.H" 00043 00044 00045 #include "Ice/IceImageUtils.H" 00046 00047 // ###################################################################### 00048 InferotemporalCortexI::InferotemporalCortexI(OptionManager& mgr, 00049 const std::string& descrName, const std::string& tagName) : 00050 ModelComponent(mgr, descrName, tagName), 00051 itsObjectMessage(new RobotSimEvents::ObjectMessage), 00052 itsAttendedRegionMessage(new RobotSimEvents::AttendedRegionMessage), 00053 itsTrainingRegionMessage(new RobotSimEvents::AttendedRegionMessage), 00054 itsUseColor(false) 00055 00056 { 00057 itsObjectMessage->id = -1; 00058 itsAttendedRegionMessage->objId = -1; 00059 itsAttendedRegionMessage->name.clear(); 00060 itsAttendedRegionMessage->img.width = -1; 00061 itsAttendedRegionMessage->img.height = -1; 00062 00063 itsTrainingRegionMessage->objId = -1; 00064 itsTrainingRegionMessage->name.clear(); 00065 itsTrainingRegionMessage->img.width = -1; 00066 itsTrainingRegionMessage->img.height = -1; 00067 00068 itsOfs = nub::ref<OutputFrameSeries>(new OutputFrameSeries(mgr)); 00069 addSubComponent(itsOfs); 00070 00071 initVDB(); 00072 00073 itsTrainingMode = false; 00074 00075 //Parameters 00076 //Camera 00077 itsCurrentCameraParam.dims = Dims(320,240); 00078 float testObjWidth = 42; 00079 float testObjDistanceFromCam = 37; 00080 itsCurrentCameraParam.focalLength = (itsCurrentCameraParam.dims.w()*testObjDistanceFromCam)/testObjWidth; 00081 itsCurrentCameraParam.yaw = -0.5*M_PI/180.0; 00082 00083 LINFO("Focal length %f", 00084 itsCurrentCameraParam.focalLength); 00085 00086 00087 //Match thresh 00088 itsMatchThresh = 4U * 4U; 00089 } 00090 00091 // ###################################################################### 00092 InferotemporalCortexI::~InferotemporalCortexI() 00093 { 00094 SimEventsUtils::unsubscribeSimEvents(itsTopicsSubscriptions, itsObjectPrx); 00095 } 00096 00097 // ###################################################################### 00098 void InferotemporalCortexI::init(Ice::CommunicatorPtr ic, Ice::ObjectAdapterPtr adapter) 00099 { 00100 Ice::ObjectPtr objPtr = this; 00101 itsObjectPrx = adapter->add(objPtr, 00102 ic->stringToIdentity("InferotemporalCortex")); 00103 00104 00105 IceStorm::TopicPrx topicPrx; 00106 00107 itsTopicsSubscriptions.push_back(SimEventsUtils::TopicInfo("AttendedRegionMessageTopic", topicPrx)); 00108 00109 SimEventsUtils::initSimEvents(ic, itsObjectPrx, itsTopicsSubscriptions); 00110 00111 itsEventsPub = RobotSimEvents::EventsPrx::uncheckedCast( 00112 SimEventsUtils::getPublisher(ic, "LandmarksMessageTopic") 00113 ); 00114 00115 IceUtil::ThreadPtr thread = this; 00116 thread->start(); 00117 00118 usleep(10000); 00119 } 00120 00121 00122 bool InferotemporalCortexI::initVDB() 00123 { 00124 itsUseColor = false; 00125 itsVDBFile = "objects.vdb"; 00126 LINFO("Loading VDB File: %s", itsVDBFile.c_str()); 00127 itsVDB.loadFrom(itsVDBFile); 00128 LINFO("VDB File Loaded"); 00129 00130 return true; 00131 } 00132 00133 // ###################################################################### 00134 void InferotemporalCortexI::run() 00135 { 00136 00137 while(1) 00138 { 00139 evolve(); 00140 usleep(10000); 00141 } 00142 00143 } 00144 00145 00146 // ###################################################################### 00147 void InferotemporalCortexI::updateMessage(const RobotSimEvents::EventMessagePtr& eMsg, 00148 const Ice::Current&) 00149 { 00150 00151 //Get a retina message 00152 if(eMsg->ice_isA("::RobotSimEvents::AttendedRegionMessage")) 00153 { 00154 00155 //Dont update any more images if we are training 00156 if (itsTrainingMode) 00157 return; 00158 RobotSimEvents::AttendedRegionMessagePtr arMsg = RobotSimEvents::AttendedRegionMessagePtr::dynamicCast(eMsg); 00159 00160 { 00161 00162 if (arMsg->name.size() && !itsTrainingMode) 00163 { 00164 itsTrainingMode = true; 00165 itsTrainingRegionMessage = arMsg; 00166 } else { 00167 itsAttendedRegionMessage = arMsg; 00168 } 00169 00170 00171 } 00172 00173 } 00174 } 00175 00176 00177 00178 // ###################################################################### 00179 void InferotemporalCortexI::evolve() 00180 { 00181 std::vector< rutz::shared_ptr<VisualObjectMatch> > matches; 00182 00183 IceUtil::Mutex::Lock lock(itsARMutex); //lock this call so we can work with the AR message safely 00184 00185 if (itsAttendedRegionMessage->img.width == -1 || 00186 itsAttendedRegionMessage->img.height == -1) 00187 return; 00188 00189 00190 Image<PixRGB<byte> > arImg; 00191 00192 if(itsTrainingMode) 00193 { 00194 arImg = Ice2Image<PixRGB<byte> >(itsTrainingRegionMessage->img); 00195 00196 if (!arImg.initialized()) 00197 return; 00198 00199 00200 rutz::shared_ptr<VisualObject> 00201 imageVO(new VisualObject("Retina Image", "NULL", arImg, 00202 Point2D<int>(-1,-1), 00203 std::vector<double>(), 00204 std::vector< rutz::shared_ptr<Keypoint> >(), 00205 itsUseColor)); 00206 00207 00208 Image<PixRGB<byte> > keyImg = imageVO->getKeypointImage(1); 00209 00210 //Loop through all of the keypoints in the image Visual Object, and push all of the ones that 00211 //fall in the attended region into this keypoints vector 00212 std::vector< rutz::shared_ptr<Keypoint> > keypoints; 00213 std::vector< rutz::shared_ptr<Keypoint> > imgKeypoints = imageVO->getKeypoints(); 00214 00215 //Construct a rectangle to represent the selected region from LGN 00216 Dims attDims( 00217 itsTrainingRegionMessage->attWidth, 00218 itsTrainingRegionMessage->attHeight 00219 ); 00220 00221 Point2D<int> attTopLeft( 00222 itsTrainingRegionMessage->attTopLeft.i, 00223 itsTrainingRegionMessage->attTopLeft.j 00224 ); 00225 00226 LINFO("Dims: %s TL: %s", convertToString(attDims).c_str(), convertToString(attTopLeft).c_str()); 00227 00228 Rectangle attRegion(attTopLeft, attDims); 00229 drawRect(keyImg, attRegion, PixRGB<byte>(255,0,255), 2); 00230 00231 keypoints.clear(); 00232 00233 for(uint i=0; i<imgKeypoints.size(); i++) 00234 { 00235 Point2D<int> keyPt(imgKeypoints.at(i)->getX(), imgKeypoints.at(i)->getY()); 00236 00237 if(attRegion.contains(keyPt)) 00238 { 00239 drawCircle(keyImg, keyPt, 2, PixRGB<byte>(0,0,255)); 00240 rutz::shared_ptr<Keypoint> k( 00241 new Keypoint( 00242 imgKeypoints.at(i)->getOriFV(), 00243 imgKeypoints.at(i)->getX() - attTopLeft.i, 00244 imgKeypoints.at(i)->getY() - attTopLeft.j, 00245 imgKeypoints.at(i)->getS(), 00246 imgKeypoints.at(i)->getO(), 00247 imgKeypoints.at(i)->getM() 00248 ) 00249 ); 00250 keypoints.push_back(k); 00251 } 00252 } 00253 00254 rutz::shared_ptr<VisualObject> 00255 vo(new VisualObject("Retina Image", 00256 "NULL", 00257 crop(arImg, attTopLeft, attDims), 00258 Point2D<int>(-1,-1), 00259 std::vector<double>(), 00260 keypoints, 00261 itsUseColor, 00262 false 00263 ) 00264 ); 00265 00266 //Object size in mm 00267 Dims objSize( 00268 (int)((float)itsTrainingRegionMessage->objWidth * itsCurrentCameraParam.focalLength / 1000), 00269 (int)((float)itsTrainingRegionMessage->objHeight * itsCurrentCameraParam.focalLength / 1000)); 00270 00271 00272 learnObject(vo, itsVDB.numObjects(), itsTrainingRegionMessage->name, objSize); 00273 LINFO("Learned Object:Id = %d, Name=%s Size=%ix%i", 00274 itsVDB.numObjects(), 00275 itsTrainingRegionMessage->name.c_str(), 00276 objSize.w(), objSize.h()); 00277 00278 00279 itsOfs->writeRGB(keyImg, "VObject", FrameInfo("VObject", SRC_POS)); 00280 00281 itsTrainingMode = false; 00282 } 00283 else 00284 { 00285 arImg = Ice2Image<PixRGB<byte> >(itsAttendedRegionMessage->img); 00286 00287 if (!arImg.initialized()) 00288 return; 00289 00290 rutz::shared_ptr<VisualObject> 00291 vo(new VisualObject("Retina Image", "NULL", arImg, 00292 Point2D<int>(-1,-1), 00293 std::vector<double>(), 00294 std::vector< rutz::shared_ptr<Keypoint> >(), 00295 itsUseColor)); 00296 00297 Image<PixRGB<byte> > keyImg = vo->getKeypointImage(1); 00298 00299 RobotSimEvents::LandmarksMessagePtr landmarksMessage = new RobotSimEvents::LandmarksMessage; 00300 00301 //Remember: Ref is the retina image, and Test is the matched database object 00302 00303 //Search for landmarks in the scene 00304 itsVDB.getObjectMatches(vo,matches); 00305 00306 for(uint i=0; i<matches.size(); i++) 00307 { 00308 matches.at(i)->prune(); 00309 00310 if(matches.at(i)->checkSIFTaffine(3.14/4.0, 30.0, 1.0)) { 00311 std::string objectName = matches.at(i)->getVoTest()->getName(); 00312 00313 Point2D<int> tl, tr, br, bl; 00314 00315 //Compute the outline of the matched object 00316 matches.at(i)->getTransfTestOutline(tl,tr,br,bl); 00317 00318 //Draw the outline of the recognized object for debugging 00319 drawLine(keyImg, tl, tr, PixRGB<byte>(0,0,255)); 00320 drawLine(keyImg, tr, br, PixRGB<byte>(0,0,255)); 00321 drawLine(keyImg, br, bl, PixRGB<byte>(0,0,255)); 00322 drawLine(keyImg, bl, tl, PixRGB<byte>(0,0,255)); 00323 00324 //Compute the bearing to the object 00325 int landmarkX = (tl.i + tr.i + br.i + bl.i)/4; 00326 float bearing = atan( ((itsCurrentCameraParam.dims.w()/2) - landmarkX) / itsCurrentCameraParam.focalLength ); 00327 bearing += itsCurrentCameraParam.yaw; 00328 00329 //Compute the range of the object 00330 float theta, sx, sy, str; 00331 matches.at(i)->getSIFTaffine().decompose(theta, sx, sy, str); 00332 float range = (sx + sy)/2.0; 00333 00334 00335 00336 //Construct the landmarkInfo to send out 00337 RobotSimEvents::LandmarkInfo currLandmarkInfo; 00338 currLandmarkInfo.id = -1; 00339 currLandmarkInfo.name = objectName; 00340 currLandmarkInfo.prob = -1; 00341 currLandmarkInfo.range = range; 00342 currLandmarkInfo.bearing = bearing; 00343 00344 //Push the landmark onto the list of landmarks to report 00345 landmarksMessage->landmarks.push_back(currLandmarkInfo); 00346 00347 writeText(keyImg, tl, 00348 objectName.c_str(), 00349 PixRGB<byte>(0,0,0), 00350 PixRGB<byte>(255,255,255), 00351 SimpleFont::FIXED(6)); 00352 // writeText(keyImg, Point2D<int>(tl.i, tl.j+10), 00353 // ("r:"+toStr<float>(range)).c_str(), 00354 // PixRGB<byte>(0,0,0), 00355 // PixRGB<byte>(255,255,255), 00356 // SimpleFont::FIXED(6)); 00357 // writeText(keyImg, Point2D<int>(tl.i, tl.j+20), 00358 // ("b:"+toStr<float>(bearing)).c_str(), 00359 // PixRGB<byte>(0,0,0), 00360 // PixRGB<byte>(255,255,255), 00361 // SimpleFont::FIXED(6)); 00362 } 00363 } 00364 itsEventsPub->updateMessage(landmarksMessage); 00365 00366 writeText(keyImg, Point2D<int>(0,0), 00367 (toStr<int>(matches.size()) + " matches").c_str(), 00368 PixRGB<byte>(0,0,0), 00369 PixRGB<byte>(255,255,255), 00370 SimpleFont::FIXED(6)); 00371 00372 00373 itsOfs->writeRGB(keyImg, "VObject", FrameInfo("VObject", SRC_POS)); 00374 00375 } 00376 00377 00378 00379 // rutz::shared_ptr<VisualObjectMatch> 00380 // match(new VisualObjectMatch(vo, itsPrevVObj, VOMA_SIMPLE, 5U)); 00381 00382 // Image<PixRGB<byte> > matchImg = match->getMatchImage(1); 00383 // itsOfs->writeRGB(matchImg, "MatchImg", FrameInfo("MatchImg", SRC_POS)); 00384 00385 // RobotSimEvents::LandmarksMessagePtr landmarksMessage = new RobotSimEvents::LandmarksMessage; 00386 00387 // //Treat each keypoint as a landmark 00388 // uint nkp = vo->numKeypoints(); 00389 // LDEBUG("Found %i keypoints\n", nkp); 00390 // for(uint kpIdx=0; kpIdx<nkp; kpIdx++) 00391 // { 00392 // rutz::shared_ptr<Keypoint> keyPoint = vo->getKeypoint(kpIdx); 00393 00394 // RobotSimEvents::LandmarkInfo landmarkInfo = getLandmarkInfo(keyPoint); 00395 00396 // //Show the found landmark as green point 00397 // if (landmarkInfo.prob != -1) 00398 // drawCircle(keyImg, 00399 // Point2D<int>((int)keyPoint->getX(), (int)keyPoint->getY()), 3, PixRGB<byte>(0,255,0)); 00400 // landmarksMessage->landmarks.push_back(landmarkInfo); 00401 // } 00402 00403 // itsOfs->writeRGB(keyImg, "VObject", FrameInfo("VObject", SRC_POS)); 00404 00405 // LDEBUG("Send %i landmarks", (int)landmarksMessage->landmarks.size()); 00406 // itsEventsPub->updateMessage(landmarksMessage); 00407 } 00408 00409 00410 00411 RobotSimEvents::LandmarkInfo InferotemporalCortexI::getLandmarkInfo(rutz::shared_ptr<Keypoint> keyPoint) 00412 { 00413 float x = keyPoint->getX(); 00414 //float y = keyPoint->getY(); 00415 float bearing = atan( ((itsCurrentCameraParam.dims.w()/2) - x) / itsCurrentCameraParam.focalLength ); 00416 00417 //TODO use ICE smart pointers 00418 RobotSimEvents::LandmarkInfo landmarkInfo; 00419 float prob; 00420 landmarkInfo.id = findKeypointID(keyPoint, prob); 00421 landmarkInfo.prob = prob; 00422 landmarkInfo.range = -1; 00423 landmarkInfo.bearing = bearing; 00424 00425 //LINFO("Key %i, prob %f", landmarkInfo.id, landmarkInfo.prob); 00426 return landmarkInfo; 00427 00428 } 00429 00430 int InferotemporalCortexI::findKeypointID(rutz::shared_ptr<Keypoint> keyPoint, float &prob) 00431 { 00432 00433 //If we have an empty database then init 00434 if (itsKeypointsDB.size() == 0) 00435 { 00436 itsKeypointsDB.push_back(keyPoint); 00437 return itsKeypointsDB.size()-1; 00438 } 00439 00440 //Search for the keypoint in the database 00441 std::vector<rutz::shared_ptr<Keypoint> >::const_iterator 00442 keyDBIter = itsKeypointsDB.begin(), 00443 bestMatch = itsKeypointsDB.begin(), 00444 keyDBStop = itsKeypointsDB.end(); 00445 00446 00447 int distsq1 = (*keyDBIter)->maxDistSquared(); 00448 int distsq2 = (*keyDBIter)->maxDistSquared(); 00449 00450 int id = 0; 00451 int bestID = 0; 00452 while(keyDBIter != keyDBStop) 00453 { 00454 const int distsq = (*keyDBIter)->distSquared(keyPoint); 00455 00456 // is this better than our best one? 00457 if (distsq < distsq1) 00458 { 00459 distsq2 = distsq1; // old best becomes second best 00460 distsq1 = distsq; // we got a new best 00461 bestMatch = keyDBIter; // remember the best keypoint 00462 bestID = id; 00463 } 00464 else if (distsq < distsq2) // maybe between best and second best? 00465 distsq2 = distsq; 00466 00467 ++keyDBIter; 00468 ++id; 00469 } 00470 00471 00472 // Check that best distance less than thresh of second best distance: 00473 //if (100U * distsq1 < itsMatchThresh * distsq2) 00474 if ( distsq1 < 100000 ) 00475 { 00476 prob = distsq1; 00477 return (int)(bestMatch - itsKeypointsDB.begin()); 00478 } else { 00479 //Add the keypoint to the database 00480 itsKeypointsDB.push_back(keyPoint); 00481 prob = -1; 00482 return itsKeypointsDB.size()-1; 00483 } 00484 00485 return -1; 00486 00487 00488 } 00489 00490 00491 void InferotemporalCortexI::learnObject(rutz::shared_ptr<VisualObject> vo, const int objId, const std::string name, const Dims objSize) 00492 { 00493 vo->setName(name); 00494 vo->setImageFname("NULL"); 00495 vo->setObjectSize(objSize); 00496 itsVDB.addObject(vo, false); //allow multiple object names 00497 itsVDB.saveTo(itsVDBFile); 00498 } 00499 00500 00501 void InferotemporalCortexI::findObjects(const rutz::shared_ptr<VisualObject> vo) 00502 { 00503 std::vector< rutz::shared_ptr<VisualObjectMatch> > matches; 00504 00505 Image<PixRGB<byte> > keyImg = vo->getKeypointImage(1); 00506 //Image<PixRGB<byte> > keyImg = vo->getImage(); 00507 00508 const uint nmatches = itsVDB.getObjectMatches(vo, matches, VOMA_SIMPLE, 00509 100U, //max objs to return 00510 0.5F, //keypoint distance score default 0.5F 00511 0.5F, //affine distance score default 0.5F 00512 1.0F, //minscore default 1.0F 00513 3U, //min # of keypoint match 00514 100U, //keypoint selection thershold 00515 false //sort by preattentive 00516 ); 00517 00518 float score = 0; 00519 float avgScore = 0, affineAvgDist = 0; 00520 int nkeyp = 0; 00521 int objId = -1; 00522 if (nmatches > 0) 00523 { 00524 rutz::shared_ptr<VisualObject> obj; //so we will have a ref to the last matches obj 00525 rutz::shared_ptr<VisualObjectMatch> vom; 00526 for (unsigned int i = 0; i < 1; ++i) 00527 { 00528 vom = matches[i]; 00529 obj = vom->getVoTest(); 00530 score = vom->getScore(); 00531 nkeyp = vom->size(); 00532 avgScore = vom->getKeypointAvgDist(); 00533 affineAvgDist = vom->getAffineAvgDist(); 00534 00535 objId = atoi(obj->getName().c_str()+3); 00536 00537 00538 //calculate the actual distance (location of keypoints) between 00539 //keypoints. If the same patch was found, then the distance should 00540 //be close to 0 00541 double dist = 0; 00542 for (int keyp=0; keyp<nkeyp; keyp++) 00543 { 00544 const KeypointMatch kpm = vom->getKeypointMatch(keyp); 00545 00546 float refX = kpm.refkp->getX(); 00547 float refY = kpm.refkp->getY(); 00548 00549 LDEBUG("Point %f,%f", 00550 refX, refY); 00551 drawCircle(keyImg, Point2D<int>((int)refX, (int)refY), 3, PixRGB<byte>(0,255,0)); 00552 00553 float tstX = kpm.tstkp->getX(); 00554 float tstY = kpm.tstkp->getY(); 00555 dist += (refX-tstX) * (refX-tstX); 00556 dist += (refY-tstY) * (refY-tstY); 00557 } 00558 00559 //Send ObjectInfo 00560 LDEBUG("### Object match with '%s' score=%f ID:%i Dist:%f", 00561 obj->getName().c_str(), vom->getScore(), objId, dist); 00562 00563 Point2D<int> tl, tr, br, bl; 00564 vom->getTransfTestOutline(tl, tr, br, bl); 00565 00566 itsObjectMessage->id = objId; 00567 itsObjectMessage->name = obj->getName(); 00568 itsObjectMessage->score = vom->getScore(); 00569 itsObjectMessage->nKeyp = nkeyp; 00570 itsObjectMessage->dist = dist; 00571 itsObjectMessage->tl.i = tl.i; itsObjectMessage->tl.j = tl.j; 00572 itsObjectMessage->tr.i = tr.i; itsObjectMessage->tr.j = tr.j; 00573 itsObjectMessage->br.i = br.i; itsObjectMessage->br.j = br.j; 00574 itsObjectMessage->bl.i = bl.i; itsObjectMessage->bl.j = bl.j; 00575 00576 itsEventsPub->updateMessage(itsObjectMessage); 00577 00578 //Draw the outline of the image 00579 drawLine(keyImg, tl, tr, PixRGB<byte>(0, 255, 0)); 00580 drawLine(keyImg, tr, br, PixRGB<byte>(0, 255, 0)); 00581 drawLine(keyImg, br, bl, PixRGB<byte>(0, 255, 0)); 00582 drawLine(keyImg, bl, tl, PixRGB<byte>(0, 255, 0)); 00583 00584 // printf("%i:%s %i %f %i %f %f %f\n", objNum, obj->getName().c_str(), 00585 // nmatches, score, nkeyp, avgScore, affineAvgDist, sqrt(dist)); 00586 00587 } 00588 } 00589 itsOfs->writeRGB(keyImg, "VObject", FrameInfo("VObject", SRC_POS)); 00590 } 00591 00592 00593