FilterOps.H

Go to the documentation of this file.
00001 /*!@file Image/FilterOps.H Filtering operations on Image
00002  */
00003 
00004 // //////////////////////////////////////////////////////////////////// //
00005 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00006 // University of Southern California (USC) and the iLab at USC.         //
00007 // See http://iLab.usc.edu for information about this project.          //
00008 // //////////////////////////////////////////////////////////////////// //
00009 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00010 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00011 // in Visual Environments, and Applications'' by Christof Koch and      //
00012 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00013 // pending; application number 09/912,225 filed July 23, 2001; see      //
00014 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00015 // //////////////////////////////////////////////////////////////////// //
00016 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00017 //                                                                      //
00018 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00019 // redistribute it and/or modify it under the terms of the GNU General  //
00020 // Public License as published by the Free Software Foundation; either  //
00021 // version 2 of the License, or (at your option) any later version.     //
00022 //                                                                      //
00023 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00024 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00025 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00026 // PURPOSE.  See the GNU General Public License for more details.       //
00027 //                                                                      //
00028 // You should have received a copy of the GNU General Public License    //
00029 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00030 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00031 // Boston, MA 02111-1307 USA.                                           //
00032 // //////////////////////////////////////////////////////////////////// //
00033 //
00034 // Primary maintainer for this file: Rob Peters <rjpeters@klab.caltech.edu>
00035 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Image/FilterOps.H $
00036 // $Id: FilterOps.H 13801 2010-08-19 19:54:49Z lior $
00037 //
00038 
00039 
00040 #ifndef IMAGE_FILTEROPS_H_DEFINED
00041 #define IMAGE_FILTEROPS_H_DEFINED
00042 
00043 #include "Util/Promotions.H"
00044 
00045 // NOTE: Many of the functions that were previously in this file have
00046 // been split into separate source files, for better logical
00047 // organization and to reduce recompile times. The basic filtering
00048 // functions, including all of the "convolve" variants as well as the
00049 // separable-filter functions are now found in Image/Convolutions.H.
00050 // The lowpass filtering functions, both the optimized 3-, 5-, and
00051 // 9-point versions as well as the generic versions, are now found in
00052 // Image/LowPass.H. Binary morphology operations
00053 // (dilate/erode/open/close) are now found in Image/MorphOps.H.
00054 
00055 // Include these for backward compatibility so that Image/FilterOps.H
00056 // still brings in all the same declarations that it did previously:
00057 #include "Image/Convolutions.H"
00058 #include "Image/LowPass.H"
00059 
00060 class Dims;
00061 template <class T> class Point2D;
00062 template <class T> class Image;
00063 
00064 //! template matching correlation
00065 template <class T>
00066   Image<typename promote_trait<T, float>::TP>
00067 correlation(const Image<T>& src, const Image<float>& filter);
00068 
00069 //! template matching useing opencv defaults to correlation if no openCV installed
00070 //! defaults to CV_TM_SQDIFF
00071 template <class T>
00072   Image<typename promote_trait<T, float>::TP>
00073 templMatch(const Image<T>& src, const Image<float>& filter, int method = 0);
00074 
00075 //! Spatial pooling taking the max value over a neighborhood
00076 /*! Compute max over a rectangle of size (si, sj) starting at positions
00077   that increment in steps (sti, stj) */
00078 template <class T>
00079 Image<typename promote_trait<T, float>::TP>
00080 spatialPoolMax(const Image<T>& src, const int si, const int sj,
00081                const int sti, const int stj);
00082 
00083 //! Feature/spatial pooling for the S2 layer of Hmax
00084 template <class T>
00085 float featurePoolHmax(const Image<T>& img1, const Image<T>& img2,
00086                       const Image<T>& img3, const Image<T>& img4,
00087                       const int si, const int sj, const float s2t);
00088 
00089 
00090 //! Oriented filter along theta (deg), spatial freq k
00091 /*! This filter works on the difference between the image and itself
00092     low-passed. It modulates this difference by a complex sinusoid,
00093     and then low-passes the real and imaginary parts, and finally
00094     takes the modulus. This method is by Greenspan et al. (1994).
00095     CAUTION: *you* must provide as input (i.e., in "this") the
00096     difference between an image and itself low-pass filtered.
00097     CAUTION: results are possibly bad with the integer versions. Use
00098     float.
00099 
00100     @param usetab Whether to use trig lookup tables internally to get
00101     sin and cos values. This may give up to a 2x speedup, at the cost
00102     of some minor loss of precision,
00103 */
00104 template <class T_or_RGB>
00105 Image<typename promote_trait<T_or_RGB, float>::TP>
00106 orientedFilter(const Image<T_or_RGB>& src, const float k, const float theta,
00107                const float intensity = 1.0, const bool usetab = false);
00108 
00109 //! Compute center-surround difference, taking the abs if absol==true
00110 /*! To the (hires) center will be substracted a (lowres)
00111   surround. Image sizes must be int multiples.  CAUTION: values will
00112   be clamped to the range of your template type.
00113   @param absol if true, take the absolute value of the difference (otherwise,
00114   negative values are clamped to zero). */
00115 template <class T>
00116 Image<T> centerSurround(const Image<T>& center,
00117                         const Image<T>& surround,
00118                         const bool absol = false);
00119 
00120 //! Compute center-surround difference, without clamping or rectifying
00121 /*! To the (hires) center will be substracted a (lowres)
00122   surround. Image sizes must be int multiples. This is implemented in
00123   a different function from centerSurround() as it returns two images,
00124   for the positive and negative values of the difference */
00125 template <class T>
00126 void centerSurround(const Image<T>& center,
00127                     const Image<T>& surround,
00128                     Image<T>& pos, Image<T>& neg);
00129 
00130 //! compute double-opponent response
00131 /*! (cplus - cminus) [-] (splus - sminus) where [-] is subtraction of two
00132   images of possibly different sizes, followed by absolute value. The result
00133   will have the size of the larger (center) image. */
00134 template <class T>
00135 Image<typename promote_trait<T, float>::TP>
00136 doubleOpp(const Image<T>& cplus, const Image<T>& cminus,
00137           const Image<T>& splus, const Image<T>& sminus);
00138 
00139 //! Compute average orientation and strength using steerable filters
00140 template <class T>
00141 void avgOrient(const Image<T>& src,
00142                Image<float>& orient, Image<float>& strength);
00143 
00144 //! Divide image by the local image energy, then subtract overall mean.
00145 template <class T>
00146 Image<T> energyNorm(const Image<T>& img);
00147 
00148 //! Compute response of a junction detector filter
00149 /*! In the full implementation here, the junction filter responds only
00150 if the relevant features are present and the irrelevant features are
00151 absent.
00152   @param i0 image filtered by a 0deg (horisontal) Gabor filter
00153   @param i45 image filtered by a 45deg Gabor filter
00154   @param i90 image filtered by a 90deg (vertical) Gabor filter
00155   @param i135 image filtered by a 135deg Gabor filter
00156   @param r boolean array of which features are considered relevant to
00157   the junction. The first element is for the horizontal (i0) feature
00158   at (x+dx, y), the second for the 45deg (i45) feature at (x+dx,
00159   y-dy), and so on, going counterclockwise:
00160 
00161   :    3   2   1
00162   :      \ | /
00163   :   4 -- o -- 0
00164   :      / | \
00165   :    5   6   7
00166 
00167   @param dx horizontal distance from current pixel at which the
00168   presence or absence of a given feature should be checked for.
00169   @param dy vertical distance from current pixel at which the
00170   presence or absence of a given feature should be checked for. */
00171 template <class T>
00172 Image<T> junctionFilterFull(const Image<T>& i0,  const Image<T>& i45,
00173                             const Image<T>& i90, const Image<T>& i135,
00174                             const bool r[8],     const int dx = 6,
00175                             const int dy = 6,
00176                             const bool useEuclidDiag = true);
00177 
00178 //! Compute response of a junction detector filter, partial implementation
00179 /*! In the partial implementation here, the junction filter responds
00180   when the relevant features are present, without consideration of
00181   what the values of the irrelevant features might be.
00182   @param i0 image filtered by a 0deg (horisontal) Gabor filter
00183   @param i45 image filtered by a 45deg Gabor filter
00184   @param i90 image filtered by a 90deg (vertical) Gabor filter
00185   @param i135 image filtered by a 135deg Gabor filter
00186   @param r boolean array of which features are considered relevant to
00187   the junction. The first element is for the horizontal (i0) feature
00188   at (x+dx, y), the second for the 45deg (i45) feature at (x+dx,
00189   y-dy), and so on, going counterclockwise:
00190 
00191   :    3   2   1
00192   :      \ | /
00193   :   4 -- o -- 0
00194   :      / | \
00195   :    5   6   7
00196 
00197   @param dx horizontal distance from current pixel at which the
00198   presence or absence of a given feature should be checked for.
00199   @param dy vertical distance from current pixel at which the
00200   presence or absence of a given feature should be checked for. */
00201 template <class T>
00202 Image<T> junctionFilterPartial(const Image<T>& i0, const Image<T>& i45,
00203                                const Image<T>& i90, const Image<T>& i135,
00204                                const bool r[8], const int dx = 6,
00205                                const int dy = 6,
00206                                const bool useEuclidDiag = false);
00207 
00208 
00209 //! Compute response of a MST detector filter
00210 /*! In the full implementation here, the MST filter responds only
00211 if the relevant features are present and the irrelevant features are
00212 absent.
00213   @param i0 image filtered by a 0deg (horisontal) Gabor filter
00214   @param i45 image filtered by a 45deg Gabor filter
00215   @param i90 image filtered by a 90deg (vertical) Gabor filter
00216   @param i135 image filtered by a 135deg Gabor filter
00217   @param r boolean array of which features are considered relevant to
00218   the MST. The first element is for the horizontal (i0) feature
00219   at (x+dx, y), the second for the 45deg (i45) feature at (x+dx,
00220   y-dy), and so on, going counterclockwise:
00221 
00222   :    3   2   1
00223   :      \ | /
00224   :   4 -- o -- 0
00225   :      / | \
00226   :    5   6   7
00227 
00228   @param dx horizontal distance from current pixel at which the
00229   presence or absence of a given feature should be checked for.
00230   @param dy vertical distance from current pixel at which the
00231   presence or absence of a given feature should be checked for. */
00232 template <class T>
00233 Image<T> MSTFilterFull(const Image<T>& i0,  const Image<T>& i45,
00234                             const Image<T>& i90, const Image<T>& i135,
00235                             const bool r[8],     const int dx = 6,
00236                             const int dy = 6,
00237                             const bool useEuclidDiag = true);
00238 
00239 //! Compute response of a MST detector filter, partial implementation
00240 /*! In the partial implementation here, the MST filter responds
00241   when the relevant features are present, without consideration of
00242   what the values of the irrelevant features might be.
00243   @param i0 image filtered by a 0deg (horisontal) Gabor filter
00244   @param i45 image filtered by a 45deg Gabor filter
00245   @param i90 image filtered by a 90deg (vertical) Gabor filter
00246   @param i135 image filtered by a 135deg Gabor filter
00247   @param r boolean array of which features are considered relevant to
00248   the MST. The first element is for the horizontal (i0) feature
00249   at (x+dx, y), the second for the 45deg (i45) feature at (x+dx,
00250   y-dy), and so on, going counterclockwise:
00251 
00252   :    3   2   1
00253   :      \ | /
00254   :   4 -- o -- 0
00255   :      / | \
00256   :    5   6   7
00257 
00258   @param dx horizontal distance from current pixel at which the
00259   presence or absence of a given feature should be checked for.
00260   @param dy vertical distance from current pixel at which the
00261   presence or absence of a given feature should be checked for. */
00262 template <class T>
00263 Image<T> MSTFilterPartial(const Image<T>& i0, const Image<T>& i45,
00264                                const Image<T>& i90, const Image<T>& i135,
00265                                const bool r[8], const int dx = 6,
00266                                const int dy = 6,
00267                                const bool useEuclidDiag = false);
00268 
00269 
00270 
00271 //! Compute the magnitude of the gradient of an image
00272 /*! This is an approximation to the gradient magnitude as used in the
00273   SIFT code. output(x, y) = sqrt([input(x+1, y) - input(x-1, y)]^2 +
00274   [input(x, y+1) - input(x, y-1)]^2)*/
00275 template <class T>
00276 Image<typename promote_trait<T, float>::TP> gradientmag(const Image<T>& input);
00277 
00278 //! Compute the orientation of the gradient of an image
00279 /*! This is an approximation to the gradient orientation as used in
00280   the SIFT code. output(x, y) = atan2(input(x, y+1) - input(x, y-1),
00281   input(x+1, y) - input(x-1, y)). Result is in radians. A value of 0
00282   corresponds to a purely horizontal rightward gradient, other values
00283   relating to that in a clockwise manner. */
00284 template <class T>
00285 Image<typename promote_trait<T, float>::TP> gradientori(const Image<T>& input);
00286 
00287 //! Compute the magnitude and orientation of the gradient
00288 /*! This is just an efficient combination of gradientmag() and
00289   gradientori() */
00290 template <class T>
00291 void gradient(const Image<T>& input,
00292               Image<typename promote_trait<T, float>::TP>& mag,
00293               Image<typename promote_trait<T, float>::TP>& ori);
00294 
00295 //! Compute the magnitude and orientation of the gradient using the sobel op
00296 template <class T>
00297 void gradientSobel(const Image<T>& input,
00298               Image<typename promote_trait<T, float>::TP>& mag,
00299               Image<typename promote_trait<T, float>::TP>& ori,
00300                                   int kernelSize = 3);
00301 
00302 
00303 //! Compute the non maximal suppression (edge thinning)
00304 Image<float> nonMaxSuppr(const Image<float>& mag, const Image<float>& ori);
00305 
00306 
00307 //! shuffle the contents of an image using Fisher-Yates shuffle
00308 //Randomly permute N elements by exchanging each element ei with a
00309 //random element from i to N. It consumes ?(N log N) bits and runs
00310 //in linear time. Obtained from
00311 //<http://www.nist.gov/dads/HTML/fisherYatesShuffle.html>
00312 
00313 template <class T_or_RGB>
00314 Image<T_or_RGB> shuffleImage(const Image<T_or_RGB> &img);
00315 
00316 
00317 // ######################################################################
00318 /* So things look consistent in everyone's emacs... */
00319 /* Local Variables: */
00320 /* indent-tabs-mode: nil */
00321 /* End: */
00322 
00323 #endif // !IMAGE_FILTEROPS_H_DEFINED
Generated on Sun May 8 08:40:52 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3