test-multiTrack.C

Go to the documentation of this file.
00001 /*!@file VFAT/test-multiTrack.C Test IEEE1394 frame grabbing and X display */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00005 // University of Southern California (USC) and the iLab at USC.         //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file:  T. Nathan Mundhenk <mundhenk@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/VFAT/test-multiTrack.C $
00035 // $Id: test-multiTrack.C 14376 2011-01-11 02:44:34Z pez $
00036 //
00037 
00038 
00039 #include "Component/ModelManager.H"
00040 #include "Component/ModelOptionDef.H"
00041 #include "Devices/CameraControl.H"
00042 #include "Devices/DeviceOpts.H"
00043 #include "Devices/FrameGrabberFactory.H"
00044 #include "GUI/XWindow.H"
00045 #include "VFAT/segmentImageMerge.H"
00046 #include "rutz/shared_ptr.h"
00047 
00048 //#include <pthread.h>
00049 #include <cstdio>
00050 #include <cstdlib>
00051 #include <iostream>
00052 
00053 // number of frames over which framerate info is averaged:
00054 #define NAVG 20
00055 
00056 int main(const int argc, const char **argv)
00057 {
00058 
00059   // instantiate a model manager:
00060   ModelManager manager("Multi Frame Grabber Tester");
00061   manager.allowOptions(OPTEXP_NONE);
00062 
00063   // Instantiate our various ModelComponents:
00064 
00065   nub::soft_ref<FrameIstream> gb1(makeIEEE1394grabber(manager));
00066   gb1->setModelParamVal("FrameGrabberSubChan", 0);
00067   //gb1->setModelParamVal("FrameGrabberGamma", 2);
00068   //gb1->setModelParamVal("framegrabber-whiteness", 1);
00069 
00070   nub::soft_ref<FrameIstream> gb2(makeIEEE1394grabber(manager));
00071   gb2->setModelParamVal("FrameGrabberSubChan", 1);
00072   //gb2->setModelParamVal("FrameGrabberGamma", 2);
00073   //gb2->setModelParamVal("framegrabber-whiteness", 100);
00074 
00075   nub::soft_ref<FrameIstream> gb3(makeIEEE1394grabber(manager));
00076   gb3->setModelParamVal("FrameGrabberSubChan", 2);
00077   //gb3->setModelParamVal("FrameGrabberGamma", 2);
00078 
00079   nub::soft_ref<FrameIstream> gb4(makeIEEE1394grabber(manager));
00080   gb4->setModelParamVal("FrameGrabberSubChan", 3);
00081   //gb3->setModelParamVal("framegrabber-whiteness", 1000);
00082 
00083   //gb3->setModelParamVal("FrameGrabberChannel", 1);
00084 
00085   manager.addSubComponent(gb1);
00086   manager.addSubComponent(gb2);
00087   manager.addSubComponent(gb3);
00088   manager.addSubComponent(gb4);
00089 
00090 
00091   // we don't want people messing around with some of our options; so
00092   // let's selectively export only those thay can play with:
00093   manager.allowOptions(OPTEXP_ALL);
00094   manager.doRequestOption(&OPT_FrameGrabberDims);
00095   manager.doRequestOption(&OPT_FrameGrabberMode);
00096   manager.doRequestOption(&OPT_FrameGrabberFPS);
00097   manager.doRequestOption(&OPT_FrameGrabberNbuf);
00098   manager.allowOptions(OPTEXP_NONE);
00099 
00100   nub::soft_ref<CameraControl>
00101     camera1(new CameraControl(manager, "Camera Controller", "CameraControl",
00102                              0, true, 0, 1, 1));
00103   nub::soft_ref<CameraControl>
00104     camera2(new CameraControl(manager, "Camera Controller", "CameraControl",
00105                              0, true, 2, 3, 1));
00106   nub::soft_ref<CameraControl>
00107     camera3(new CameraControl(manager, "Camera Controller", "CameraControl",
00108                              0, true, 4, 5, 1));
00109   nub::soft_ref<CameraControl>
00110     camera4(new CameraControl(manager, "Camera Controller", "CameraControl",
00111                              0, true, 6, 7, 1));
00112 
00113   manager.addSubComponent(camera1);
00114   manager.addSubComponent(camera2);
00115   manager.addSubComponent(camera3);
00116   manager.addSubComponent(camera4);
00117 
00118   // Parse command-le:
00119   if (manager.parseCommandLine(argc, argv, "", 0, 0) == false) return(1);
00120 
00121   // do post-command-line configs:
00122   //nub::soft_ref<FrameIstream> gb1 = gbc1->getFrameGrabber();
00123   //nub::soft_ref<FrameIstream> gb2 = gbc2->getFrameGrabber();
00124   /*if (gb1.isInvalid())
00125     LFATAL("You need to select a frame grabber type via the "
00126            "--fg-type=XX command-line option for this program "
00127            "to be useful");
00128   if (gb2.isInvalid())
00129     LFATAL("You need to select a frame grabber type via the "
00130            "--fg-type=XX command-line option for this program "
00131            "to be useful");*/
00132   int width = gb1->getWidth(), height = gb1->getHeight();
00133   float delay[4];
00134   delay[0] = 0; delay[1] = 0; delay[2] = 0; delay[3] = 0;
00135 
00136   // let's get all our ModelComponent instances started:
00137   manager.start();
00138   XWindow wini1(Dims(width, height), 0, 0, "test-input window 1");
00139   XWindow wini2(Dims(width, height), 0, 0, "test-input window 2");
00140   XWindow wini3(Dims(width, height), 0, 0, "test-input window 3");
00141   XWindow wini4(Dims(width, height), 0, 0, "test-input window 4");
00142 
00143   XWindow roomOver(Dims(400,400),0,0,"Room Overhead");
00144   XWindow roomFront(Dims(400,400),0,0,"Room Front");
00145   Image<PixRGB<byte> > overhead;
00146   Image<PixRGB<byte> > front;
00147 
00148 
00149   Timer tim;
00150 
00151   Image< PixRGB<float> > fima;
00152 
00153   std::vector< Image< PixRGB<byte> > > ima;
00154   ima.resize(4);
00155 
00156   std::vector< Image< PixRGB<byte> > > display;
00157   display.resize(4);
00158 
00159   Timer camPause[4];       // to pause the move command
00160   camPause[0].reset();
00161   camPause[1].reset();
00162   camPause[2].reset();
00163   camPause[3].reset();
00164 
00165   uint64 t[NAVG]; int frame = 0;
00166 
00167   // Create tracker and state how many trackers will be used
00168   segmentImageMerge segmenter(4);
00169 
00170   //****************************
00171   // set up tracking parameters
00172   //****************************
00173 
00174   // HSV mean color to start with and standard deviation
00175   //segmenter.setTrackColor(12,8,0.23,0.08,135,35,0,true,15);
00176   //segmenter.setTrackColor(12,8,0.23,0.08,135,35,1,true,15);
00177   //segmenter.setTrackColor(12,8,0.23,0.08,135,35,2,true,15);
00178   //segmenter.setTrackColor(12,8,0.23,0.08,135,35,3,true,15);
00179   segmenter.setTrackColor(10,10,0.15,0.20,150,150,0,true,15);
00180   segmenter.setTrackColor(10,10,0.15,0.20,150,150,1,true,15);
00181   segmenter.setTrackColor(10,10,0.15,0.20,150,150,2,true,15);
00182   segmenter.setTrackColor(10,10,0.15,0.20,150,150,3,true,15);
00183   //segmenter.setTrackColor(13,7,0.17,0.3,156,150,0,true,15);
00184   //segmenter.setTrackColor(13,7,0.17,0.3,156,150,1,true,15);
00185   //segmenter.setTrackColor(13,7,0.17,0.3,156,150,2,true,15);
00186   //segmenter.setTrackColor(13,7,0.17,0.3,156,150,3,true,15);
00187 
00188   // HSV Hard Boundaries (H.upper,H.lower,...,tracker)
00189   segmenter.setAdaptBound(20,5,.30,.15,170,100,0);
00190   segmenter.setAdaptBound(20,5,.30,.15,170,100,1);
00191   segmenter.setAdaptBound(20,5,.30,.15,170,100,2);
00192   segmenter.setAdaptBound(20,5,.30,.15,170,100,3);
00193 
00194 
00195   //segmenter.setAdaptBound(15,5,.30,.25,140,100,0);
00196   //segmenter.setAdaptBound(15,5,.30,.25,140,100,1);
00197   //segmenter.setAdaptBound(15,5,.30,.25,140,100,2);
00198   //segmenter.setAdaptBound(15,5,.30,.25,140,100,0);
00199   //segmenter.setAdaptBound(15,5,.30,.25,140,100,1);
00200   //segmenter.setAdaptBound(15,5,.30,.25,140,100,2);
00201   //segmenter.setAdaptBound(15,5,.30,.25,140,100,3);
00202   //segmenter.setAdaptBound(15,5,.30,.25,140,100,3);
00203   //segmenter.setAdaptBound(40,5,.40,.10,170,100,0);
00204   //segmenter.setAdaptBound(40,5,.40,.10,170,100,1);
00205   //segmenter.setAdaptBound(40,5,.40,.10,170,100,2);
00206   //segmenter.setAdaptBound(40,5,.40,.10,170,100,3);
00207 
00208   // Inspection box size for each tracker
00209   segmenter.setFrame(0,0,width/4,height/4,width/4,height/4,0);
00210   segmenter.setFrame(0,0,width/4,height/4,width/4,height/4,1);
00211   segmenter.setFrame(0,0,width/4,height/4,width/4,height/4,2);
00212   segmenter.setFrame(0,0,width/4,height/4,width/4,height/4,3);
00213 
00214   // RGB color of tracker circle for display
00215   segmenter.setCircleColor(0,255,0,0);
00216   segmenter.setCircleColor(0,255,0,1);
00217   segmenter.setCircleColor(0,255,0,2);
00218   segmenter.setCircleColor(0,255,0,3);
00219 
00220   // RGB color of tracker bounding box for display
00221   segmenter.setBoxColor(255,255,0,0);
00222   segmenter.setBoxColor(255,255,0,1);
00223   segmenter.setBoxColor(255,255,0,2);
00224   segmenter.setBoxColor(255,255,0,3);
00225 
00226   // set what type of color adaptation to use and if to use it
00227   segmenter.setAdapt(3,true,3,true,3,true,0);
00228   segmenter.setAdapt(3,true,3,true,3,true,1);
00229   segmenter.setAdapt(3,true,3,true,3,true,2);
00230   segmenter.setAdapt(3,true,3,true,3,true,3);
00231 
00232   CameraParams* params = (CameraParams*)calloc(4, sizeof(CameraParams));
00233   CameraParams* tempParams = (CameraParams*)calloc(2, sizeof(CameraParams));
00234   PixelPoint* points = (PixelPoint*)calloc(4, sizeof(PixelPoint));
00235   PixelPoint* tempPoints = (PixelPoint*)calloc(2, sizeof(PixelPoint));
00236 
00237   params[0] = CameraParams(15.0, 0.0, 3.0, 90.0, -90.0, 0.465/2.54, 2.5);
00238   params[1] = CameraParams(5.5, 0.0, 3.0, 90.0, -90.0, 0.465/2.54, 2.5);
00239   params[2] = CameraParams(-5.5, 0.0, 3.0, 90.0, -90.0, 0.465/2.54, 2.5);
00240   params[3] = CameraParams(-15.0, 0.0, 3.0, 90.0, -90.0, 0.465/2.54, 2.5);
00241 
00242 
00243   points[0] = PixelPoint(0.0, 0.0);
00244   points[1] = PixelPoint(0.0, 0.0);
00245   points[2] = PixelPoint(0.0, 0.0);
00246   points[3] = PixelPoint(0.0, 0.0);
00247   // iteratively grab video from source and feed it into tracker
00248   overhead.resize(288,384);
00249   front.resize(288,288);
00250 
00251   while(1) {
00252     tim.reset();
00253 
00254 
00255     ima[0] = gb1->readRGB();
00256     ima[1] = gb2->readRGB();
00257     ima[2] = gb3->readRGB();
00258     ima[3] = gb4->readRGB();
00259 
00260     uint64 t0 = tim.get();  // to measure display time
00261 
00262     //display[0] = ima[0];
00263     //display[1] = ima[1];
00264     //display[2] = ima[2];
00265     //display[3] = ima[3];
00266 
00267     // call tracker on images
00268     segmenter.trackImageMulti(&ima,4);
00269 
00270     int modi[4],modj[4];
00271     // get camera movement parameters from tracker
00272     for(int i = 0; i < 4; i++)
00273     {
00274       if(camPause[i].get() > delay[i])
00275       {
00276         float doPan, doTilt;
00277         if(segmenter.returnLOT(i) == false)
00278         {
00279           segmenter.getImageTrackXY2(&modi[i],&modj[i],i);
00280           modi[i] = modi[i]*8;
00281           modj[i] = 480-modj[i]*8;
00282           // stereo stuff
00283           points[i].x = (modi[i]-320)*(4.6/(2.54*659));
00284           points[i].y = (modj[i]-240)*(3.97/(2.54*494));
00285           if(modi[i] > 0 && modi[i] < 640 && modj[i] > 0 && modj[i] < 480)
00286           {
00287             //std::cout << "doing Camera " << i << "\n";
00288             if(i == 0)
00289             {
00290               delay[i] = camera1->moveCamXYFrame(modi[i],modj[i],i);
00291               if((delay[i] > 0) || (delay[i] == -5))
00292               {
00293                 camPause[i].reset();
00294                 doPan = camera1->getCurrentPan();
00295                 doTilt = camera1->getCurrentTilt();
00296                 segmenter.setCameraPosition(doPan,doTilt,i,true);
00297               }
00298             }
00299             if(i == 1)
00300             {
00301               delay[i] = camera2->moveCamXYFrame(modi[i],modj[i],i);
00302               if((delay[i] > 0) || (delay[i] == -5))
00303               {
00304                 camPause[i].reset();
00305                 doPan = camera2->getCurrentPan();
00306                 doTilt= camera2->getCurrentTilt();
00307                 segmenter.setCameraPosition(doPan,doTilt,i,true);
00308               }
00309             }
00310             if(i == 2)
00311             {
00312               delay[i] = camera3->moveCamXYFrame(modi[i],modj[i],i);
00313               if((delay[i] > 0) || (delay[i] == -5))
00314               {
00315                 camPause[i].reset();
00316                 doPan = camera3->getCurrentPan();
00317                 doTilt = camera3->getCurrentTilt();
00318                 segmenter.setCameraPosition(doPan,doTilt,i,true);
00319               }
00320             }
00321             if(i == 3)
00322             {
00323 
00324               delay[i] = camera4->moveCamXYFrame(modi[i],modj[i],i);
00325               if((delay[i] > 0) || (delay[i] == -5))
00326               {
00327                 camPause[i].reset();
00328                 doPan = camera4->getCurrentPan();
00329                 doTilt = camera4->getCurrentTilt();
00330                 segmenter.setCameraPosition(doPan,doTilt,i,true);
00331               }
00332             }
00333             // stereo stuff
00334             params[i].theta = 180.0-doTilt;
00335             params[i].phi = -180.0+doPan;
00336           }
00337         }
00338         else
00339         {
00340           if(segmenter.doMoveCamera(i,&doPan,&doTilt) == true)
00341           {
00342             //LINFO("MOVING LOT camera %d to %f %f",i,doPan,doTilt);
00343 
00344             if(i == 0)
00345             {
00346               delay[i] = camera1->moveCamTPFrame(doPan,doTilt,i);
00347               if(delay[i] > 0)
00348               {
00349                 segmenter.setCameraPosition(doPan,doTilt,i,false);
00350                 camPause[i].reset();
00351               }
00352             }
00353             if(i == 1)
00354             {
00355               delay[i] = camera2->moveCamTPFrame(doPan,doTilt);
00356               if(delay[i] > 0)
00357               {
00358                 segmenter.setCameraPosition(doPan,doTilt,i,false);
00359                 camPause[i].reset();
00360               }
00361             }
00362             if(i == 2)
00363             {
00364               delay[i] = camera3->moveCamTPFrame(doPan,doTilt);
00365               if(delay[i] > 0)
00366               {
00367                 segmenter.setCameraPosition(doPan,doTilt,i,false);
00368                 camPause[i].reset();
00369               }
00370             }
00371             if(i == 3)
00372             {
00373               delay[i] = camera4->moveCamTPFrame(doPan,doTilt);
00374               if(delay[i] > 0)
00375               {
00376                 segmenter.setCameraPosition(doPan,doTilt,i,false);
00377                 camPause[i].reset();
00378               }
00379             }
00380           }
00381         }
00382       }
00383     }
00384 
00385     // END
00386 
00387     // draw all our X window displays
00388 
00389     int highest = 0;
00390     int nextHighest = 1;
00391     int colorSetR[4];
00392     int colorSetB[4];
00393     for(int i = 0; i < 4; i++)
00394     {
00395       colorSetR[i] = 0;
00396       colorSetB[i] = 255;
00397 
00398       float p = segmenter.returnCameraProb(i);
00399       LINFO("CAMERA %d is P %f",i,p);
00400       bool high = true;
00401       bool nextHigh = false;
00402       bool stop = false;
00403       for(int j = 0; j < 4; j++)
00404       {
00405         if(j != i)
00406         {
00407           if(p < segmenter.returnCameraProb(j))
00408           {
00409             //LINFO("%f is LT %f",p, segmenter.returnCameraProb(j));
00410             high = false;
00411             if((nextHigh == false) && (stop == false))
00412             {
00413               nextHigh = true;
00414               stop = true;
00415             }
00416             else
00417             {
00418               //LINFO("NEXTHIGHEST FALSE");
00419               nextHigh = false;
00420             }
00421           }
00422         }
00423       }
00424       if(high == true)
00425       {
00426         highest = i;
00427       }
00428       if(nextHigh == true)
00429       {
00430         nextHighest = i;
00431       }
00432     }
00433     colorSetR[highest] = 255;
00434     colorSetB[highest] = 0;
00435     colorSetR[nextHighest] = 255;
00436     colorSetB[nextHighest] = 0;
00437 
00438     LINFO("HIGHEST %d NEXT HIGHEST %d",highest,nextHighest);
00439     //if((segmenter.returnLOT(highest) == false) && (segmenter.returnLOT(nextHighest) == false))
00440     //{
00441       Point3D retPoint = Point3D(0.0, 0.0, 0.0);
00442       tempParams[0] = params[0];
00443       tempParams[1] = params[1];
00444       tempPoints[0] = points[0];
00445       tempPoints[1] = points[1];
00446       bool retVal = segmenter.StereoMatch(tempPoints, tempParams, &retPoint);
00447 
00448       //printf("###############################################################\n");
00449 
00450       //        printf("%f %f\n", tempPoints[0].x, tempPoints[0].y);
00451       //        printf("%f %f %f %f %f %f %f\n", tempParams[0].x, tempParams[0].y, tempParams[0].z,
00452       //                        tempParams[0].theta, tempParams[0].phi, tempParams[0].f, tempParams[0].r);
00453       //
00454       //                printf("%f %f\n", tempPoints[1].x, tempPoints[1].y);
00455       //        printf("%f %f %f %f %f %f %f\n", tempParams[1].x, tempParams[1].y, tempParams[1].z,
00456       //                        tempParams[1].theta, tempParams[1].phi, tempParams[1].f, tempParams[1].r);
00457       if(retVal)
00458       {
00459         overhead.resize(400,400,true);
00460         front.resize(400,400,true);
00461         drawGrid(overhead, 96,96,1,1,PixRGB<byte>(150,150,150));
00462         drawGrid(front, 96,96,1,1,PixRGB<byte>(150,150,150));
00463 
00464         drawCircle(overhead, Point2D<int>(140,200)
00465                    ,2,PixRGB<byte>(colorSetR[0],0,colorSetB[0]),3);
00466         drawCircle(overhead, Point2D<int>(180,200)
00467                    ,2,PixRGB<byte>(colorSetR[1],0,colorSetB[1]),3);
00468         drawCircle(overhead, Point2D<int>(220,200)
00469                    ,2,PixRGB<byte>(colorSetR[2],0,colorSetB[2]),3);
00470         drawCircle(overhead, Point2D<int>(260,200)
00471                    ,2,PixRGB<byte>(colorSetR[3],0,colorSetB[3]),3);
00472 
00473         drawCircle(overhead, Point2D<int>(200,220)
00474                    ,2,PixRGB<byte>(0,255,255),3);
00475         float pointX = 200-(4*retPoint.x);
00476         float pointY = 200-(4*(-1*retPoint.y));
00477 
00478         //std::cout << "pointX " << pointX << " pointY " << pointY << "\n";
00479         if(((pointX < 400) && (pointX > 0)) && ((pointY < 400) && (pointY > 0)))
00480         {
00481           printf("x=%f y=%f z=%f\n", retPoint.x, retPoint.y, retPoint.z);
00482           drawCircle(overhead, Point2D<int>((int)pointX,(int)pointY)
00483                      ,2,PixRGB<byte>(255,0,0),2);
00484         }
00485       }
00486       else
00487         printf("Not Admissible\n");
00488       //}
00489     //printf("###############################################################\n");
00490 
00491     wini1.drawImage(ima[0]);
00492     wini2.drawImage(ima[1]);
00493     wini3.drawImage(ima[2]);
00494     wini4.drawImage(ima[3]);
00495     roomOver.drawImage(overhead);
00496     roomFront.drawImage(front);
00497 
00498 
00499     t[frame % NAVG] = tim.get();
00500     t0 = t[frame % NAVG] - t0;
00501     if (t0 > 28) LINFO("Display took %llums", t0);
00502 
00503     // compute and show framerate over the last NAVG frames:
00504     if (frame % NAVG == 0 && frame > 0)
00505     {
00506       uint64 avg = 0; for (int i = 0; i < NAVG; i ++) avg += t[i];
00507       float avg2 = 1000.0 / (float)avg * NAVG;
00508       printf("Framerate: %.1f fps\n", avg2);
00509     }
00510     frame ++;
00511   }
00512 
00513   manager.stop();
00514   return 0;
00515 }
00516 
00517 // ######################################################################
00518 /* So things look consistent in everyone's emacs... */
00519 /* Local Variables: */
00520 /* indent-tabs-mode: nil */
00521 /* End: */
Generated on Sun May 8 08:42:36 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3