featureClusterFilters.H

Go to the documentation of this file.
00001 /*!@file VFAT/featureClusterFilters.H Filters for featureClusterVision */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00005 // University of Southern California (USC) and the iLab at USC.         //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: T Nathan Mundhenk <mundhenk@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/VFAT/featureClusterFilters.H $
00035 // $Id: featureClusterFilters.H 14376 2011-01-11 02:44:34Z pez $
00036 //
00037 
00038 #ifndef FEATURECLUSTERFILTERS_H_DEFINED
00039 #define FEATURECLUSTERFILTERS_H_DEFINED
00040 
00041 // ############################################################
00042 // ############################################################
00043 // ##### --- VFAT ---
00044 // ##### Vision Feature Analysis Tool:
00045 // ##### T. Nathan Mundhenk nathan@mundhenk.com
00046 // ##### Laurent Itti itti@pollux.usc.edu
00047 // #####
00048 // ############################################################
00049 // ############################################################
00050 
00051 
00052 #include "Util/Assert.H"
00053 #include "Image/Image.H"
00054 
00055 #include <vector>
00056 
00057 //! this method will apply a filter at a location and return a value
00058 /*! This method is useful if your need to filter an image in a
00059   non-linear method or only need to extract filtered patches.
00060   Other than that, you should use the other methods above.
00061 */
00062 template <class T_or_RGB> inline
00063 T_or_RGB filterAtLocation(Image<T_or_RGB> *image,
00064                       Image<float> *kernel, Point2D<int> *location);
00065 
00066 //! this method will apply a filter at a location and return a value
00067 /*! This method is useful if your need to filter an image in a
00068   non-linear method or only need to extract filtered patches.
00069   Other than that, you should use the other methods above.
00070   This method differs from filterAtLocation in that it accepts
00071   a batch of locations.
00072 */
00073 template <class T_or_RGB> inline
00074 void filterAtLocationBatch(const Image<T_or_RGB> *image,
00075                            const Image<float> *kernel,
00076                            const std::vector<Point2D<int>*> *location,
00077                            std::vector<T_or_RGB> *results,
00078                            const unsigned int size);
00079 
00080 //! This method applies filter on an image at multiple scales at select locals
00081 /*! When called this method will run each of the supplied filters on the
00082   image at the scales ranging from full scale to size/2^n. To keep the
00083   workings straight, supply a name for each filter and then you can
00084   be sure how the results allign in the output vector. The method
00085   for each filter is basic convolution, but it limited to each location
00086   rather than the whole image.
00087   @param image this is the input raw image
00088   @param location This is a list of the locations at which to apply filters
00089   @param locations This is how many locations are in the list
00090   @param filterNames a list of what each filter is called for labels
00091   @param filters these are several kernels for filter use
00092   @param scales how man scales down do you want to filter each image
00093   @param quarterSize scale image by sqrt(.5)(quarter-ish) or 1/2
00094   @param normalize normalize over the filter outputs
00095   @param results what you get back from all this
00096   @param resultsLables User supplied descriptors for each filter
00097 */
00098 template <class T_or_RGB> inline
00099 void multiScaleBatchFilter(const Image<T_or_RGB> *image,
00100                            const std::vector<Point2D<int>*> *location,
00101                            const unsigned int *locations,
00102                            const std::vector<Image<float> > *filters,
00103                            const unsigned int scales,
00104                            const bool quarterSize,
00105                            const bool normalize,
00106                            std::vector<std::vector<T_or_RGB> > *results);
00107 
00108 //! this will find junctions after calling multiScaleBatchFilter
00109 /*! This is optional and may be called to find junctions if
00110   multiScaleBatchFilter was called using gabor filters at
00111   0,45,90 and 135 degrees. Call this after calling multiScaleBatchFilter
00112   if wanted.
00113 */
00114 template <class T> inline
00115 void multiScaleJunctionFilter(const unsigned int size,
00116                               const unsigned int scales,
00117                               const std::vector<std::vector<PixH2SV2<T> > >
00118                               *input,
00119                               std::vector<std::vector<PixH2SV2<T> > > *results);
00120 
00121 //! call this optionally after multiScaleBatchFilter
00122 template <class T> inline
00123 void multiScaleJunctionFilter(const unsigned int size,
00124                               const unsigned int scales,
00125                               const std::vector<std::vector<T> >
00126                               *input,
00127                               std::vector<std::vector<T> > *results);
00128 
00129 // ######################################################################
00130 // ######################################################################
00131 // ##### Inline function definitions
00132 // ######################################################################
00133 // ######################################################################
00134 
00135 // ######################################################################
00136 template <class T_or_RGB> inline
00137 T_or_RGB filterAtLocation(Image<T_or_RGB> *image,
00138                           Image<float> *kernel, Point2D<int> *location)
00139 {
00140   ASSERT((kernel->getWidth() % 2 == 1) && (kernel->getHeight() % 2 == 1));
00141   int w        = image->getWidth();
00142   int h        = image->getHeight();
00143   int kw       = kernel->getWidth();
00144   int kh       = kernel->getHeight();
00145   int kx       = (kw-1)/2;
00146   int ky       = (kh-1)/2;
00147   int kstartx  = 0;
00148   int kstarty  = 0;
00149 
00150   // do some extra work to account for overlapping on edge
00151   // e.g. bounds checking
00152 
00153   int startx = location->i - kx;
00154   if(startx < 0){ kstartx = abs(startx); startx = 0;}
00155   int endx   = location->i + kx;
00156   if(endx >= w) { endx = w - 1; }
00157 
00158   int starty = location->j - ky;
00159   if(starty < 0){ kstarty = abs(starty); starty = 0;}
00160   int endy   = location->j + ky;
00161   if(endy >= h) { endy = h - 1; }
00162 
00163   T_or_RGB result = T_or_RGB(0);
00164   T_or_RGB zero   = T_or_RGB(0);
00165   float sum = 0;
00166   //LINFO("C");
00167   //LINFO("%d %d %d %d",startx,endx,starty,endy);
00168   //LINFO("%d %d",kstartx,kstarty);
00169   typename Image<float>::iterator    ker;
00170   typename Image<T_or_RGB>::iterator img;
00171   for(int iy = starty; iy <= endy; iy++)
00172   {
00173     // start at kernel offset for any edges
00174     ker = kernel->beginw() +
00175       (kw*(kstarty+iy-starty)) + kstartx;
00176     //LINFO("%d", (kw*(kstarty+iy-starty))+ kstartx);
00177     //LINFO("%d", w*iy + startx);
00178     //LINFO("%d", w*iy + endx);
00179     // start in image offset for any edges
00180     for(img = image->beginw() + w*iy + startx;
00181         img != image->beginw() + w*iy + endx; ++img, ++ker)
00182     {
00183       result += clamped_convert<T_or_RGB>((*img) * (*ker));
00184       //LINFO("C2");
00185       sum += *ker;
00186       //LINFO("C3");
00187     }
00188   }
00189   // normalize results
00190   if(sum != 0)
00191   {
00192     result = clamped_convert<T_or_RGB>(result/sum);
00193   }
00194   else
00195   {
00196     result = zero;
00197   }
00198   return result;
00199 }
00200 
00201 // ######################################################################
00202 template <class T_or_RGB> inline
00203 void filterAtLocationBatch(const Image<T_or_RGB> *image,
00204                            const Image<float> *kernel,
00205                            const std::vector<Point2D<int>*> *location,
00206                            std::vector<T_or_RGB> *results,
00207                            const unsigned int size)
00208 {
00209   ASSERT((kernel->getWidth() % 2 == 1) && (kernel->getHeight() % 2 == 1));
00210   ASSERT(results->size() >= size);
00211   int w        = image->getWidth();
00212   int h        = image->getHeight();
00213   int kw       = kernel->getWidth();
00214   int kh       = kernel->getHeight();
00215   int kx       = (kw-1)/2;
00216   int ky       = (kh-1)/2;
00217   int kstartx  = 0;
00218   int kstarty  = 0;
00219 
00220   typename std::vector<T_or_RGB>::iterator resultsItr = results->begin();
00221 
00222   std::vector<Point2D<int>*>::const_iterator locationItr = location->begin();
00223 
00224   // do some extra work to account for overlapping on edge
00225   // e.g. bounds checking
00226 
00227   int startx,endx,starty,endy;
00228 
00229   typename Image<float>::const_iterator ker;
00230   typename Image<T_or_RGB>::const_iterator img;
00231   bool useSlow;
00232   T_or_RGB zero = T_or_RGB(0);
00233 
00234   for(unsigned int i = 0; i < size; i++, ++resultsItr, ++locationItr)
00235   {
00236     useSlow = false;
00237     startx = (*locationItr)->i - kx;
00238     if(startx < 0){ kstartx = abs(startx); startx = 0; useSlow = true; }
00239     endx   = (*locationItr)->i + kx;
00240     if(endx >= w) { endx = w - 1; useSlow = true; }
00241 
00242     starty = (*locationItr)->j - ky;
00243     if(starty < 0){ kstarty = abs(starty); starty = 0; useSlow = true; }
00244     endy   = (*locationItr)->j + ky;
00245     if(endy >= h) { endy = h - 1; useSlow = true; }
00246 
00247     float sum = 0;
00248     // use slow if the kernal runs off the image
00249     // otherwise use a slightly faster iterator incr.
00250     *resultsItr = zero;
00251 
00252     if(useSlow == true)
00253     {
00254       //LINFO("SLOW");
00255       for(int iy = starty; iy <= endy; iy++)
00256       {
00257         // start at kernel offset for any edges
00258         ker = kernel->begin() +
00259           (kw*(kstarty+iy-starty)) + kstartx;
00260         // start in image offset for any edges
00261         for(img = image->begin() + w*iy + startx;
00262             img != image->begin() + w*iy + endx; ++img, ++ker)
00263         {
00264           *resultsItr += clamped_convert<T_or_RGB>((*img) * (*ker));
00265           sum += *ker;
00266         }
00267       }
00268       // normalize results
00269       if(sum != 0)
00270       {
00271         *resultsItr = clamped_convert<T_or_RGB>((*resultsItr)/sum);
00272       }
00273       else
00274       {
00275         *resultsItr = zero;
00276       }
00277     }
00278     else
00279     {
00280       //LINFO("FAST");
00281       ker = kernel->begin();
00282       for(int iy = starty; iy <= endy; iy++)
00283       {
00284         for(img = image->begin() + w*iy + startx;
00285             img != image->begin() + w*iy + endx; ++img, ++ker)
00286         {
00287           *resultsItr += clamped_convert<T_or_RGB>((*img) * (*ker));
00288           sum += *ker;
00289         }
00290       }
00291       if(sum != 0)
00292       {
00293         *resultsItr = clamped_convert<T_or_RGB>((*resultsItr)/sum);
00294       }
00295       else
00296       {
00297         *resultsItr = zero;
00298       }
00299     }
00300   }
00301 }
00302 
00303 // ######################################################################
00304 template <class T_or_RGB> inline
00305 void multiScaleBatchFilter(const Image<T_or_RGB> *image,
00306                            const std::vector<Point2D<int>*> *location,
00307                            const unsigned int *locations,
00308                            const std::vector<Image<float> > *filters,
00309                            const unsigned int scales,
00310                            const bool quarterSize,
00311                            const bool normalize,
00312                            std::vector<std::vector<T_or_RGB> > *results)
00313 {
00314 
00315   // set up a scan line map
00316   T_or_RGB zero = T_or_RGB(0);
00317   //PixH2SV<T_or_RGB> zero = PixH2SV<T_or_RGB>(0);
00318 
00319   // this is how much to reduce each image by in fact for img pyramid
00320   const double rooty = sqrt(.5);
00321 
00322   // An operable copt of the image locations
00323   std::vector<Point2D<int> > currentLocation1;
00324   std::vector<Point2D<int> > currentLocation2;
00325   std::vector<Point2D<int> > *currentLocation;
00326 
00327   // A working copy of the current image
00328   Image<T_or_RGB> currentImage1 = *image;
00329   Image<T_or_RGB> currentImage2;
00330   Image<T_or_RGB> *currentImage;
00331 
00332   // Copy all pertinent locations over
00333   //LINFO("COPY LOCATIONS");
00334 
00335   std::vector<Point2D<int>*>::const_iterator ilocation = location->begin();
00336 
00337   for(unsigned int i = 0; i < *locations; i++, ++ilocation)
00338     {
00339       currentLocation1.push_back(**ilocation);
00340     }
00341   if(quarterSize)
00342     {
00343       Point2D<int> pp;
00344       currentImage2 = rescaleBilinear(currentImage1,
00345                               (int)floor(currentImage1.getWidth()*rooty),
00346                               (int)floor(currentImage1.getHeight()*rooty));
00347       currentLocation2.resize(currentLocation1.size(),pp);
00348       std::vector<Point2D<int> >::iterator icur1 =  currentLocation1.begin();
00349       std::vector<Point2D<int> >::iterator icur2 =  currentLocation2.begin();
00350       while(icur1 != currentLocation1.end())
00351         {
00352           icur2->i = (int)floor(icur1->i * rooty);
00353           icur2->j = (int)floor(icur1->j * rooty);
00354           ++icur1; ++icur2;
00355         }
00356     }
00357 
00358   int startx,starty;
00359   unsigned int endx,endy;
00360 
00361   Image<float>::const_iterator ker;
00362   typename Image<T_or_RGB>::iterator img;
00363 
00364   typename std::vector<std::vector<T_or_RGB> >::iterator iresults =
00365     results->begin();
00366 
00367   //LINFO("START");
00368   // iterate over each scale
00369   for(unsigned int sc = 0; sc < scales; sc++)
00370     {
00371       if((!quarterSize) || (sc%2 == 0) || (sc == 0))
00372         {
00373           currentLocation = &currentLocation1;
00374           currentImage    = &currentImage1;
00375         }
00376       else
00377         {
00378           currentLocation = &currentLocation2;
00379           currentImage    = &currentImage2;
00380         }
00381 
00382       const unsigned int w        = currentImage->getWidth();
00383       const unsigned int h        = currentImage->getHeight();
00384       unsigned int kstartx        = 0;
00385       unsigned int kstarty        = 0;
00386 
00387       for(std::vector<Image<float> >::const_iterator
00388             ifilters = filters->begin();
00389           ifilters != filters->end(); ++ifilters, ++iresults)
00390         {
00391           // iterate over each location
00392 
00393           const unsigned int kw       = ifilters->getWidth();
00394           const unsigned int kh       = ifilters->getHeight();
00395           const unsigned int kx       = (kw-1)/2;
00396           const unsigned int ky       = (kh-1)/2;
00397 
00398           typename std::vector<T_or_RGB>::iterator iiresults =
00399             iresults->begin();
00400           std::vector<Point2D<int> >::iterator icurrentLocation
00401             = currentLocation->begin();
00402           for(unsigned int l = 0; l < *locations; l++,
00403                 ++icurrentLocation, ++iiresults)
00404             {
00405               startx = icurrentLocation->i - kx;
00406               if(startx < 0)
00407                 { kstartx = abs(startx); startx = 0; }
00408               endx   = icurrentLocation->i + kx;
00409               if(endx >= w) { endx = w - 1; }
00410 
00411               starty = icurrentLocation->j - ky;
00412               if(starty < 0)
00413                 { kstarty = abs(starty); starty = 0; }
00414               endy   = icurrentLocation->j + ky;
00415               if(endy >= h) { endy = h - 1; }
00416 
00417               float sum = 0;
00418               // use slow if the kernal runs off the image
00419               // otherwise use a slightly faster iterator incl
00420 
00421               for(unsigned int iy = starty; iy <= endy; iy++)
00422                 {
00423                   // start at kernel offset for any edges
00424                   ker = ifilters->begin() +
00425                     (kw*(kstarty+iy-starty)) + kstartx;
00426                   // start in image offset for any edges
00427                   for(img  = currentImage->beginw() + w*iy + startx;
00428                       img != currentImage->beginw() + w*iy + endx; ++img, ++ker)
00429                     {
00430                       *iiresults += clamped_convert<T_or_RGB>((*img) * (*ker));
00431                       sum += fabs(*ker);
00432                     }
00433                 }
00434               // normalize results
00435               if(normalize)
00436               {
00437                 if(sum != 0)
00438                   {
00439                     *iiresults = clamped_convert<T_or_RGB>((*iiresults)/sum);
00440                   }
00441                 else
00442                   {
00443                     *iiresults = zero;
00444                   }
00445               }
00446               kstarty = 0;
00447               kstartx = 0;
00448             }
00449         }
00450 
00451       // reduce the image using decimation and
00452       // adjust the coordinate selection accordingly
00453       *currentImage    = decXY(*currentImage);
00454       // note rooty = sqrt(.5);
00455 
00456       for(std::vector<Point2D<int> >::iterator icurrentLocation
00457             = currentLocation->begin();
00458           icurrentLocation != currentLocation->end();
00459           ++icurrentLocation)
00460         {
00461           *icurrentLocation = *icurrentLocation/2;
00462         }
00463     }
00464 }
00465 
00466 // ######################################################################
00467 template <class T> inline
00468 void multiScaleJunctionFilter(const unsigned int size,
00469                               const unsigned int scales,
00470                               const std::vector<std::vector<PixH2SV2<T> > >
00471                               *input,
00472                               std::vector<std::vector<PixH2SV2<T> > > *results)
00473 {
00474   const PixH2SV2<T> zero(T(0));
00475 
00476   typename std::vector<std::vector<PixH2SV2<T> > >::const_iterator
00477     iinput1a = input->begin();
00478   typename std::vector<std::vector<PixH2SV2<T> > >::const_iterator
00479     iinput2a = input->begin()+1;
00480   typename std::vector<std::vector<PixH2SV2<T> > >::const_iterator
00481     iinput1b = input->begin()+2;
00482   typename std::vector<std::vector<PixH2SV2<T> > >::const_iterator
00483     iinput2b = input->begin()+3;
00484   typename std::vector<std::vector<PixH2SV2<T> > >::iterator
00485     iresults = results->begin();
00486 
00487   for(unsigned int s = 0; s < scales; s++)
00488     {
00489       typename std::vector<PixH2SV2<T> >::const_iterator
00490         iiinput1a = iinput1a->begin();
00491       typename std::vector<PixH2SV2<T> >::const_iterator
00492         iiinput2a = iinput2a->begin();
00493       typename std::vector<PixH2SV2<T> >::const_iterator
00494         iiinput1b = iinput1b->begin();
00495       typename std::vector<PixH2SV2<T> >::const_iterator
00496         iiinput2b = iinput2b->begin();
00497       typename std::vector<PixH2SV2<T> >::iterator iiresults = iresults->begin();
00498 
00499       PixH2SV2<T> max1a = PixH2SV2<T>(0); PixH2SV2<T> max2a = PixH2SV2<T>(0);
00500       PixH2SV2<T> max1b = PixH2SV2<T>(0); PixH2SV2<T> max2b = PixH2SV2<T>(0);
00501 
00502       for(unsigned int i = 0; i < size; i++,++iiinput1a)
00503         {
00504           max1a = maxmerge(*iiinput1a,max1a);
00505         }
00506       for(unsigned int i = 0; i < size; i++,++iiinput2a)
00507         {
00508           max2a = maxmerge(*iiinput2a,max2a);
00509         }
00510       for(unsigned int i = 0; i < size; i++,++iiinput1b)
00511         {
00512           max1b = maxmerge(*iiinput1b,max1b);
00513         }
00514       for(unsigned int i = 0; i < size; i++,++iiinput2b)
00515         {
00516           max2b = maxmerge(*iiinput2b,max2b);
00517         }
00518 
00519       iiinput1a = iinput1a->begin();
00520       iiinput2a = iinput2a->begin();
00521       iiinput1b = iinput1b->begin();
00522       iiinput2b = iinput2b->begin();
00523 
00524       const PixH2SV2<T> norm1( max1a + max1b );
00525       const PixH2SV2<T> norm2( max2a + max2b );
00526 
00527 
00528       if((norm1 != zero) && (norm2 != zero))
00529         {
00530           for(unsigned int i = 0; i < size; i++,
00531                 ++iiinput1a,++iiinput2a,
00532                 ++iiinput1b,++iiinput2b,
00533                 ++iiresults)
00534             {
00535               const PixH2SV2<T> c1mix( abs((*iiinput1a)-(*iiinput1b))/(norm1) );
00536               const PixH2SV2<T> c2mix( abs((*iiinput2a)-(*iiinput2b))/(norm2) );
00537               // compute crossyness at location (strength of junction)
00538               const PixH2SV2<T> c1Gmix( ((*iiinput1a)+(*iiinput1b))/(norm1) );
00539               const PixH2SV2<T> c2Gmix( ((*iiinput2a)+(*iiinput2b))/(norm2) );
00540               // Indicates general intensity of the output
00541               const PixH2SV2<T> iOutA( (c1Gmix+c2Gmix)/2 );
00542               // Finish computing crossyness by subtracting lineyness
00543               // indicates the crispness of lines
00544               const PixH2SV2<T> iOutB( abs(c1mix-c2mix)/2 );
00545               // indicates the crispness of junctions
00546               *iiresults            = PixH2SV2<T>(iOutA - iOutB);
00547               // Include the explicit instantiations
00548             }
00549         }
00550       else
00551         {
00552           LINFO("WARNING - AT %d ALL VALUES ARE ZERO",s);
00553           for(unsigned int i = 0; i < size; i++,++iiresults)
00554             {
00555               *iiresults = zero;
00556             }
00557         }
00558       iinput1a = iinput1a + 4;
00559       iinput2a = iinput2a + 4;
00560       iinput1b = iinput1b + 4;
00561       iinput2b = iinput2b + 4;
00562       ++iresults;
00563     }
00564 }
00565 
00566 // ######################################################################
00567 template <class T> inline
00568 void multiScaleJunctionFilter(const unsigned int size,
00569                               const unsigned int scales,
00570                               const std::vector<std::vector<T> >
00571                               *input,
00572                               std::vector<std::vector<T> > *results)
00573 {
00574   const T zero = T(0);
00575 
00576   typename std::vector<std::vector<T> >::const_iterator
00577     iinput1a = input->begin();
00578   typename std::vector<std::vector<T> >::const_iterator
00579     iinput2a = input->begin()+1;
00580   typename std::vector<std::vector<T> >::const_iterator
00581     iinput1b = input->begin()+2;
00582   typename std::vector<std::vector<T> >::const_iterator
00583     iinput2b = input->begin()+3;
00584   typename std::vector<std::vector<T> >::iterator
00585     iresults = results->begin();
00586 
00587   for(unsigned int s = 0; s < scales; s++)
00588     {
00589       typename std::vector<T>::const_iterator
00590         iiinput1a = iinput1a->begin();
00591       typename std::vector<T>::const_iterator
00592         iiinput2a = iinput2a->begin();
00593       typename std::vector<T>::const_iterator
00594         iiinput1b = iinput1b->begin();
00595       typename std::vector<T>::const_iterator
00596         iiinput2b = iinput2b->begin();
00597       typename std::vector<T>::iterator iiresults = iresults->begin();
00598 
00599       T max1a = T(0); T max2a = T(0);
00600       T max1b = T(0); T max2b = T(0);
00601 
00602       for(unsigned int i = 0; i < size; i++,++iiinput1a)
00603         {
00604           max1a = maxmerge(*iiinput1a,max1a);
00605         }
00606       for(unsigned int i = 0; i < size; i++,++iiinput2a)
00607         {
00608           max2a = maxmerge(*iiinput2a,max2a);
00609         }
00610       for(unsigned int i = 0; i < size; i++,++iiinput1b)
00611         {
00612           max1b = maxmerge(*iiinput1b,max1b);
00613         }
00614       for(unsigned int i = 0; i < size; i++,++iiinput2b)
00615         {
00616           max2b = maxmerge(*iiinput2b,max2b);
00617         }
00618 
00619       iiinput1a = iinput1a->begin();
00620       iiinput2a = iinput2a->begin();
00621       iiinput1b = iinput1b->begin();
00622       iiinput2b = iinput2b->begin();
00623 
00624       const T norm1 = max1a + max1b;
00625       const T norm2 = max2a + max2b;
00626 
00627 
00628       if((norm1 != 0) && (norm2 != 0))
00629         {
00630           for(unsigned int i = 0; i < size; i++,
00631                 ++iiinput1a,++iiinput2a,
00632                 ++iiinput1b,++iiinput2b,
00633                 ++iiresults)
00634             {
00635               const T c1mix = T(fabs((*iiinput1a)-(*iiinput1b))/(norm1));
00636               const T c2mix = T(fabs((*iiinput2a)-(*iiinput2b))/(norm2));
00637               // compute crossyness at location (strength of junction)
00638               const T c1Gmix = (((*iiinput1a)+(*iiinput1b))/(norm1));
00639               const T c2Gmix = (((*iiinput2a)+(*iiinput2b))/(norm2));
00640               // Indicates general intensity of the output
00641               const T iOutA  = (c1Gmix+c2Gmix)/2;
00642               // Finish computing crossyness by subtracting lineyness
00643               // indicates the crispness of lines
00644               const T iOutB  = T(fabs(c1mix-c2mix)/2);
00645               // indicates the crispness of junctions
00646               *iiresults            = iOutA - iOutB;
00647               // Include the explicit instantiations
00648             }
00649         }
00650       else
00651         {
00652           LINFO("WARNING - AT %d ALL VALUES ARE ZERO",s);
00653           for(unsigned int i = 0; i < size; i++,++iiresults)
00654             {
00655               *iiresults = zero;
00656             }
00657         }
00658       iinput1a = iinput1a + 4;
00659       iinput2a = iinput2a + 4;
00660       iinput1b = iinput1b + 4;
00661       iinput2b = iinput2b + 4;
00662       ++iresults;
00663     }
00664 }
00665 
00666 #endif // !FEATURECLUSTERFILTERS_H_DEFINED
00667 
00668 // ######################################################################
00669 /* So things look consistent in everyone's emacs... */
00670 /* Local Variables: */
00671 /* indent-tabs-mode: nil */
00672 /* End: */
Generated on Sun May 8 08:07:02 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3