MathOps.H

Go to the documentation of this file.
00001 /*!@file Image/MathOps.H Mathematical operations on Image
00002  */
00003 
00004 // //////////////////////////////////////////////////////////////////// //
00005 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00006 // University of Southern California (USC) and the iLab at USC.         //
00007 // See http://iLab.usc.edu for information about this project.          //
00008 // //////////////////////////////////////////////////////////////////// //
00009 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00010 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00011 // in Visual Environments, and Applications'' by Christof Koch and      //
00012 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00013 // pending; application number 09/912,225 filed July 23, 2001; see      //
00014 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00015 // //////////////////////////////////////////////////////////////////// //
00016 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00017 //                                                                      //
00018 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00019 // redistribute it and/or modify it under the terms of the GNU General  //
00020 // Public License as published by the Free Software Foundation; either  //
00021 // version 2 of the License, or (at your option) any later version.     //
00022 //                                                                      //
00023 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00024 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00025 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00026 // PURPOSE.  See the GNU General Public License for more details.       //
00027 //                                                                      //
00028 // You should have received a copy of the GNU General Public License    //
00029 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00030 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00031 // Boston, MA 02111-1307 USA.                                           //
00032 // //////////////////////////////////////////////////////////////////// //
00033 //
00034 // Primary maintainer for this file: Rob Peters <rjpeters@klab.caltech.edu>
00035 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Image/MathOps.H $
00036 // $Id: MathOps.H 14592 2011-03-11 23:19:12Z jshen $
00037 //
00038 
00039 #ifndef IMAGE_MATHOPS_H_DEFINED
00040 #define IMAGE_MATHOPS_H_DEFINED
00041 
00042 #include "Image/Image.H"
00043 #include "Util/Promotions.H"
00044 #include "Image/Pixels.H" //out when we templatize the power-noise subroutine
00045 #include <vector>
00046 
00047 class Dims;
00048 template <class T> class Point2D;
00049 class Rectangle;
00050 template <class T> class Image;
00051 template <class T> class Range;
00052 
00053 //! fill with a generating function: x = func() for each x in image
00054 /*! returns a copy of the functor object after having been called */
00055 template <class T, class F>
00056 inline F fill(Image<T>& x, F func);
00057 
00058 //! apply a function in-place: x = func(x) for each x in image
00059 /*! returns a copy of the functor object after having been called */
00060 template <class T, class F>
00061 inline F apply(Image<T>& x, F func);
00062 
00063 //! apply a function out-of-place: y = func(x) for x in input, y in output
00064 template <class T2, class T, class F>
00065 inline Image<T2> transform(const Image<T>& x, F func);
00066 
00067 //! Returns the sum of all input pixels.
00068 template <class T>
00069 double sum(const Image<T>& a);
00070 
00071 //! Get the average of all input pixels.
00072 template <class T>
00073 double mean(const Image<T>& a);
00074 
00075 //! compute standard deviation of pixel values over image
00076 template <class T>
00077 double stdev(const Image<T>& a);
00078 
00079 //! Get the min/max range of the input pixels.
00080 /*! This provides similar functionality to Image<T>::getMinMax(). */
00081 template <class T>
00082 Range<T> rangeOf(const Image<T>& img);
00083 
00084 //! Linearly remap pixels from the source to dest dynamic ranges.
00085 /*! This is similar to, but more general than, Image<T>::normalize(). In
00086     effect, Image<T>::normalize() always assumes that the source range is
00087     the image's own min/max range. In contrast, remapRange() allows one to
00088     find the global min/max across a range of images, and then use that as
00089     the source range while normalizing each the images using that range. */
00090 template <class T>
00091 Image<T> remapRange(const Image<T>& img,
00092                     const Range<T>& from, const Range<T>& to);
00093 
00094 //! Square each element of the input image.
00095 template <class T>
00096 Image<T> squared(const Image<T>& a);
00097 
00098 //! Raise each element of the input image to the given power.
00099 template <class T>
00100 Image<typename promote_trait<T,float>::TP>
00101 toPower(const Image<T>& a, double pow);
00102 
00103 //! Raise pixels in given region of an input image to the given power. Pass a vector of 2D points that define the region of the image that you want to enhance.
00104 template <class T>
00105 Image<typename promote_trait<T,float>::TP>
00106 toPowerRegion(const Image<T>& a, double pow, std::vector<Point2D<int> > region);
00107 
00108 //! Take absolute value of every pixel.
00109 template <class T>
00110 Image<T> abs(const Image<T>& a);
00111 
00112 //! Compute exp(pixel / (2 * sigma^2)) for every pixel.
00113 template <class T>
00114 Image<typename promote_trait<T,float>::TP>
00115 hmaxActivation(const Image<T>& a, const float sigma);
00116 /*
00117 //! Returns abs(b - c).
00118 //template <class T>
00119 //Image<T> absDiff(const Image<T>& b, const Image<T>& c);
00120 */
00121 //! Returns abs(b - c) for each color channel
00122 template <class T_or_RGB>
00123 Image<T_or_RGB> absDiff(const Image<T_or_RGB>& b,
00124                         const Image<T_or_RGB>& c);
00125 
00126 //! Returns (b - c), clamping to zero all negative values.
00127 //template <class T>
00128 //Image<T> clampedDiff(const Image<T>& b, const Image<T>& c);
00129 
00130 //! Returns (b - c) for each color channel, clamping to zero all neg. values
00131 template <class T_or_RGB>
00132 Image<T_or_RGB> clampedDiff(const Image<T_or_RGB>& b,
00133                             const Image<T_or_RGB>& c);
00134 
00135 //! Returns (val - a).
00136 template <class T>
00137 Image<T> binaryReverse(const Image<T>& a, const T val);
00138 
00139 //! Returns (b + c) / 2, clean & prevents overflows.
00140 template <class T>
00141 Image<T> average(const Image<T>& b, const Image<T>& c);
00142 
00143 //! Returns (a*w + b) / 2, the weighted average between images
00144 template <class T>
00145 Image<T> averageWeighted(Image<T>& a, const Image<T>& b, T *aWeight);
00146 
00147 //! Returns max(a, b), element by element.
00148 template <class T_or_RGB>
00149 Image<T_or_RGB> takeMax(const Image<T_or_RGB>& a,
00150                         const Image<T_or_RGB>& b);
00151 
00152 //! Take element-wise max of a1,a2 and use that to choose b1,b2, storing a[12] result into aout and b[12] result into bout
00153 template <class T> 
00154 void takeLinkedMax(const Image<T>& a1, const Image<T>& a2, const Image<T>& b1, const Image<T>& b2, Image<T>& aout, Image<T>& bout);
00155 
00156 //! Returns min(a, b), element by element.
00157 template <class T>
00158 Image<T> takeMin(const Image<T>& a, const Image<T>& b);
00159 
00160 //! Compute energy for quadrature pair (sqrt(img1^2 + img2^2)).
00161 template <class T_or_RGB>
00162 Image<T_or_RGB> quadEnergy(const Image<T_or_RGB>& img1,
00163                            const Image<T_or_RGB>& img2);
00164 
00165 //! RMS error between arr1 and arr2.
00166 template <class T>
00167 double RMSerr(const Image<T>& arr1, const Image<T>& arr2);
00168 
00169 //! Create transparency overlay of two images
00170 /*! by laying top over bottom with transparency of top set to
00171  trans. A new image will be returned (use 0 to 100 for percent
00172  transparency). NOTE: top and bottom must be the same size!
00173  this function only works for grey scale images */
00174 template <class T_or_RGB>
00175 Image<T_or_RGB> overlay(const Image<T_or_RGB>& top, const Image<T_or_RGB>& bottom,
00176                  const float trans);
00177 
00178 //! sum pixels along the X and Y axes (sumX, sumY) and in total (return value)
00179 /*!@param img is a w x h size image
00180   @param sumX will return a vector of length w with the sum over the rows of img
00181   @param sumY will return a vector of length h with the sum over the columns*/
00182 template <class T>
00183 float sumXY(const Image<T>& img, std::vector<float>& sumX,
00184             std::vector<float>& sumY);
00185 
00186 //! Returns Euclidean distance between two images
00187 /*! Returns sqrt(sum_over_pixels((img1[i] - img2[i])^2)) */
00188 template <class T>
00189 double distance(const Image<T> &img1, const Image<T> &img2);
00190 
00191 //! Returns weighted Euclidean distance between two images
00192 /*! Returns sqrt(sum_over_pixels(weight[i] * (img1[i] - img2[i])^2)) */
00193 template <class T>
00194 double distance(const Image<T> &img1, const Image<T> &img2,
00195                 const Image<float> &weight);
00196 
00197 //! Returns the correlation coefficient r^2 of the two images.
00198 double corrcoef(const Image<float>& img1, const Image<float>& img2);
00199 
00200 //! Returns the cross-correlation coefficient r between two patches
00201 /*! Note that the computations done here differ from what is done in
00202   corrcoeff, in that a signed correlation coefficient is returned
00203   (between -1 and 1), and we add a convention that if both patches
00204   have zero variance then the correlation is 1. If only one patch has
00205   zero variance then the correlation is zero. Current implementation
00206   requires that the patch fits within both image bounds, and assumes a
00207   small patch size so that it will not attempt to use a faster
00208   FFT-based implementation which can save time with larger patches.
00209   @param img1 first image
00210   @param topleft1 coordinates of top-left corner of patch in img1
00211   @param patchdims dimensions of patch
00212   @param img2 second image
00213   @param topleft2 coordinates of top-left corner of patch in img2
00214   @param eps epsilon used to determine whether patch variance is zero */
00215 template <class T>
00216 double corrpatch(const Image<T>& img1, const Point2D<int>& topleft1,
00217                  const Dims& patchdims, const Image<T>& img2,
00218                  const Point2D<int>& topleft2, const double eps = 1.0e-20);
00219 
00220 //! return correlation matrix and means for input samples
00221 /*! This class will take a set of images and produce the eigen / correlation
00222     matrices. The input provided consists of a set of images:
00223 
00224     (1) An image for each feature dimension
00225     (2) Multiple sets of (1) for each frame sample
00226 
00227     That is, you provide a cardinal set of images that contains multiple
00228     image frames as well as feature maps for each frame.
00229 
00230     This class returns an image that is the eigen matrix given the samples
00231     you have provided. It also returns the mean values per dimension. The
00232     usefulness of this is that one can compute and fit a gaussian to
00233     the data. This can be used for instance in likelyhood estimation.
00234 
00235     The return eigen matrix is either a basic covariance matrix with
00236     un-normalized values useful for likelyhood estimation OR you can
00237     opt to have it normalize the matrix by the product of the standard
00238     deviations. This gives a pearson r correlation coefficent.
00239 
00240     note on input: The set is a nest vector of images. It should be of the form
00241     in c++ notation
00242 
00243           baseImages[frames][features]
00244 
00245     Thus, the first nesting should index each frame while the second should
00246     index each feature image.
00247 
00248     We compute covariance using an augmented method from Hayes 5th Ed. 1991
00249 
00250     @param baseImages The set of all input images
00251     @param baseCorr a return image that is the eigen matrix
00252     @param baseMean a return image that is the set of means
00253     @param baseSTD a return image that is the set of independant standard dev.
00254     @param baseSS a return image that contains the sum of squares
00255     @param baseN a return uint that is the total sample size
00256     @param returnR Should we return the normalized R or the eigen matrix
00257 */
00258 
00259 template <class T>
00260 void corrEigenMatrix(const std::vector<std::vector<Image<T> > > &baseImages,
00261                      Image<T> &baseCorr, Image<T> &baseMean,
00262                      Image<T> &baseSTD,  Image<T> &baseSS,
00263                      uint &baseN, bool returnR = false);
00264 
00265 //! Given an eigen matrix and mean matrix, compute a likelyhood image
00266 /*! In this function we take in an image and the statistics of an
00267     exemplar image set. We then use this to find a likelyhood image
00268     which is how much each pixel in the input image is like the exempler
00269     based on probability. High return values mean that a pixel has a high
00270     probability of belonging to the exemplar class.
00271 
00272     Basically, we compute P(x|C) for each pixel assuming baseImages as the
00273     samples and baseCorr as the eigen matrix that describes the distribution
00274     of the original sample. This computes P from the multi-variate gaussian.
00275     The results can be taken and fed into getNormalized bayes image with
00276     the results from a second class to give a true bayes P for each pixel
00277     belonging to one class or the other.
00278 
00279     Note: the non-normalized likelhood image is usefull for certain things
00280     but should not be used in deriving the direct bayed probability
00281 
00282     @param baseImage the image set you wish to find the likelyhood over
00283     @param baseCorr The eigen matrix correlation from the training set
00284     @param baseMean The set of mean values from the training set
00285     @param returnLogLikelyhood Keep numbers sane and return log not e^
00286     @param likelyhoodImage this is the returned likelyhood image
00287     @param nonNormalizedLImage this is a non-normalized likelyhood image
00288 
00289 */
00290 template <class T>
00291 void getLikelyhoodImage(const std::vector<Image<T> > &baseImages,
00292                         const Image<T> &baseCorr, const Image<T> &baseMean,
00293                         const bool returnLogLikelyhood,
00294                         Image<T> &likelyhoodImage,
00295                         Image<T> &nonNormalizedLImage);
00296 
00297 //! Take in two likelyhood images and compute bayes p between them
00298 /*! The idea here is to compute the true bayesian probability of
00299     membership in one image or another given the likelyhood computed in
00300     getLikelyhoodImage. Here we compute the p(C|x) for two classes.
00301 
00302     The idea here is to compute the likelyhood that a pixel belongs to one
00303     class v. another in an input image. You must input two likelyhood images
00304     of the form P(x|C) (as computed in getLikelyhoodImage). Also, you may
00305     input the prior probability P(C) if P(C1) is not P(C2). Otherwise,
00306     it is assumed P(C1) = P(C2).
00307 
00308     The return image has the P for each pixel being in class 1 v. class 2
00309     to get the result the other way, compute 1 - P (pretty easy).
00310 
00311     To compute this, we perform P(C|x) = g(a) where:
00312 
00313     a = ln((P(x|C1)*P(C1))/(P(x|C2)*P(C2)))
00314 
00315     g(a) = 1/(1 + exp(-a))
00316 
00317     This is known as logistic discrimination (Bishop 1995)
00318 
00319     @param classImage1 the likelyhood image of class 1
00320     @param classImage2 the likelyhood image of class 2
00321     @usingLogLikelyhood set to true if using log rather than e^ likelyhood
00322     @param classPrior1 the prior of class 1 (sample numbers)
00323     @param classPrior2 the prior of class 2 (sample numbers)
00324     @param bias This is a simple bias if desired, P(x)' = P(x)*bias
00325 */
00326 template <class T>
00327 Image<T> getNormalizedBayesImage(const Image<T> classImage1,
00328                                  const Image<T> classImage2,
00329                                  const bool usingLogLikelyhood,
00330                                  const T beta,
00331                                  const T classPrior1 = 1.0,
00332                                  const T classPrior2 = 1.0,
00333                                  const T bias        = 1.0);
00334 
00335 //! Take in an image of correlation and standard deviation, give r
00336 /*! You can use this function to give r from an already existing eigen matrix
00337     for instance, one computed from corrEigenMatrix. Thus, you can post hoc
00338     compute r if you opted not to when using corrEigenMatrix. This function
00339     just basically normalizes the eigen matrix by the standard deviation
00340     to give a true pearson r correlation matrix
00341 
00342     @param eigenMatrix The input eigen correlation matrix
00343     @param STDMatrix The input independant standard deviations
00344 */
00345 template <class T>
00346 Image<T> getPearsonRMatrix(const Image<T> &eigenMatrix,
00347                            const Image<T> &STDMatrix);
00348 
00349 //! Augment the bayes image with some beliefs
00350 /*! The belief images should be from 0 to 1 where 0 signifies
00351     complete belief in the reliability of class1 results while 0
00352     signifies absolutly no reliability. This basicially takes the product
00353     of the two as the final augmented belief.
00354 
00355     Using the median point, beliefs can be contracted to single values
00356     for instance, if the input bayes image ranges from 0 to 2, if the median
00357     is specified as 1, then values below 1 will tend to 1 as beleifs decrease
00358     while values above 1, will tend to 1 as beliefs decrease.
00359 
00360     @param bayesImage An image from getNormalizedBayesImage
00361     @param beliefImage1 the belief for class image 1
00362     @param beliefImage2 the belief for class image 2
00363     @param medianPoint a middle point values are squashed to
00364     @param beliefImage The augemented belief image
00365     @param beliefValue a record of the beleif products
00366 */
00367 
00368 template <class T>
00369 void getAugmentedBeliefBayesImage(const Image<T> &bayesImage,
00370                                   const Image<T> &beliefImage1,
00371                                   const Image<T> &beliefImage2,
00372                                   const T medianPoint,
00373                                   Image<T> &beliefImage,
00374                                   Image<T> &beliefValue);
00375 
00376 
00377 
00378 //! Returns peak signal-to-noise ratio
00379 /*! pSNR is computed as 10.log_10(255^2/sigma^2) where
00380   sigma^2=/N.sum(x1_i - x2_i)^2 and is a measure of distortion between
00381   two images */
00382 template <class T>
00383 double pSNR(const Image<T> &img1, const Image<T> &img2);
00384 
00385 //! Returns weighted peak signal-to-noise ratio
00386 /*! This is like the other pSNR() except that it uses the weighted
00387   version of distance() internally */
00388 template <class T>
00389 double pSNR(const Image<T> &img1, const Image<T> &img2,
00390             const Image<float>& weight);
00391 
00392 //! Take square root of all elements
00393 template <class T>
00394 Image<typename promote_trait<T,float>::TP> sqrt(const Image<T>& img);
00395 
00396 //! Take inverse of all elements > eps in abs, 0 otherwise
00397 template <class T>
00398 Image<typename promote_trait<T,float>::TP> inverse(const Image<T>& img,
00399                                                    const T eps);
00400 
00401 //! Take exponential of all pixels
00402 template <class T>
00403 Image<typename promote_trait<T,float>::TP> exp(const Image<T>& img);
00404 
00405 //! Take negative exponential of all pixels, that is exp(-x)
00406 template <class T>
00407 Image<typename promote_trait<T,float>::TP> negexp(const Image<T>& img);
00408 
00409 //! Take natural logarithm of all pixels
00410 template <class T>
00411 Image<typename promote_trait<T,float>::TP> log(const Image<T>& img);
00412 
00413 //! Take base-10 logarithm of all pixels
00414 template <class T>
00415 Image<typename promote_trait<T,float>::TP> log10(const Image<T>& img);
00416 
00417 //! determine the first and last non-zero values and the centroid
00418 /*!@param vect a vector of numbers
00419   @param centroid returns the centroid of vect
00420   @param first returns the position of the first non-zero value in vect
00421   @param last returns the position of the last non-zero value in vect
00422   @return true if we actually found all these values, false if vect
00423   only contains zeros*/
00424 template <class T>
00425 bool getCentroidFirstLast(std::vector<T> vect, float& centroid,
00426                           int& first, int& last);
00427 
00428 //! determine the centroid of all the points in the image
00429 /*! The centroid is rounded to integer pixel positions.
00430   When there are no values > 0 in the img, (-1,-1) is returned
00431   @param boundingBox the bounding box of the object(s) contained in
00432   the image is returned
00433   @param cenX returns the float value of the centroid along the x axis
00434   @param cenY returns the float value of the centroid along the y axis
00435   @return the centroid rounded to integer coordinates*/
00436 template <class T>
00437 Point2D<int> centroid(const Image<T>& img, Rectangle& boundingBox,
00438                  float& cenX, float& cenY);
00439 
00440 //! determine the center of mass of all the points in the image
00441 /*! The centroid is rounded to integer pixel positions.
00442   When there are no values > 0 in the img, (-1,-1) is returned */
00443 template <class T>
00444 Point2D<int> centroid(const Image<T>& img);
00445 
00446 //! Apply a squashing function to the pixel values
00447 /*! The squashing function is a 4th-order polynomial with sigmoidal
00448   shape. Denoting by x a pixel value in the original image and by
00449   y=f(x) the transformed pixel value that will be put in the result
00450   image, the contraints on the polynomial are: 1) f(oldmin)=newmin, 2)
00451   f(oldmax)=newmax, 3) f(oldmid)=newmid, 4) f'(oldmin)=0, and 5)
00452   f'(oldmax)=0. So, typically, the polynomial will have horizontal
00453   slope at both ends of the input range, and will remap the old
00454   min/max to the new min/max while also remapping a user-chosen
00455   mid-point oldmid to newmid. Playing with this mid-point allows the
00456   user to give more emphasis to higher or lower values while
00457   remapping. This is the full form, but two simplified forms are also
00458   available that will compute some of the arguments here from the
00459   input image. Because the main use of this function is when oldmin is
00460   the true minimum value of the input image, and oldmax its true
00461   maximum, here we will not waste time checking for input values
00462   outside this range, and their remapped values are unclear as the
00463   polynomial is unconstrained outside the input range. Internal
00464   remapping is done using floats, and results are converted back (with
00465   possible clamping) to the same type as the original image, so that
00466   this function may be efficiently used to do contrast adjustments in
00467   Image<byte> data. */
00468 template<class T>
00469 Image<T> squash(const Image<T>& src,
00470                 const T oldmin, const T newmin,
00471                 const T oldmid, const T newmid,
00472                 const T oldmax, const T newmax);
00473 
00474 //! Apply a squashing function to the pixel values
00475 /*! In this specialization of the general form of squash(), the old
00476   min/max are computed using getMinMax(). */
00477 template<class T>
00478 Image<T> squash(const Image<T>& src, const T newmin,
00479                 const T oldmid, const T newmid, const T newmax);
00480 
00481 //! Apply a squashing function to the pixel values
00482 /*! In this specialization of the general form of squash(), the old
00483   min/max are computed using getMinMax(), and the new min/max will be
00484   kept equal to the old ones, so that there is no overall change in
00485   range (unless your mid-point is outside that range). */
00486 template<class T>
00487 Image<T> squash(const Image<T>& src, const T oldmid, const T newmid);
00488 
00489 //! Mix between two images based on comparing mask values to a threshold
00490 /*! On every pixel, if mask >= thresh, the returned value is taken
00491   from higher, otherwise it is taken from lower. All three of lower,
00492   highr and mask must have same dims. */
00493 template <class T, class T_or_RGB>
00494 Image<T_or_RGB> thresholdedMix(const Image<T>& mask, const T& thresh,
00495                                const Image<T_or_RGB>& lower,
00496                                const Image<T_or_RGB>& higher);
00497 
00498 //! take the logistic sigmoid 1/(1+e^x(...)) over the image
00499 /*! This is a standard logistic sigmoid with offset o and slope b
00500   @param ima the input image
00501   @param o Offset for this sigmoid
00502   @param b Slope for this sigmoid
00503  */
00504 Image<float> logSig(const Image<float>& ima, float o, float b);
00505 
00506 //! randomly scramble image pixels
00507 template <class T_or_RGB>
00508 Image<T_or_RGB> scramble(const Image<T_or_RGB>& ima);
00509 
00510 //! take in an image and return it's statistically most relevent points
00511 /*!
00512   Input an image to find a monte carlo like map. This will in essence cut
00513   out about half the pixels in a simple quick fashion by applying  the formula:
00514   select if -> pow(pixelValue,bias) > minVal+(maxVal*rand()/RAND_MAX+1.0)
00515   The bias is used to scew the distribution using an exponent
00516   if no bias is given, this method will work more efficently by skipping
00517   the exponent computation over each pixel.
00518   @param ima This is the input probability map
00519   @param coords this is a pointer to a vector which will hold selected coords
00520   @param bais this scews the probability map by pow(pixVal,bias)
00521 
00522 */
00523 int32 findMonteMap(Image<float>& ima,
00524                    std::vector<Point2D<int> >* coords,
00525                    int decimation, float bias);
00526 
00527 //! Take in a vector and decimate it according how many points you want back
00528 /*!
00529   In essence this will decimate a list of coordinates attempting to create a
00530   list of the size in outPoints. Thus, if you have list of 23890 points and
00531   want a list of 300 points, this will take every nth point and create a
00532   list of about this size.
00533   @param coords This is the list of input coordinates
00534   @param cmap This is the final list of coordinates after decimation
00535   @param inPoints this is the input list size
00536   @param outPoints this is how many points you would like out
00537 */
00538 
00539 int32 makeSparceMap(std::vector<Point2D<int> >* coords, std::vector<Point2D<int>*>* cmap,
00540                     std::vector<Point2D<int> >* Oldcoords,
00541                     std::vector<bool>* keep, int inPoints, int outPoints);
00542 
00543 
00544 //! a += b * coeff
00545 template <class T>
00546 void inplaceAddWeighted(Image<T>& a,
00547                         const Image<T>& b, const float coeff);
00548 
00549 //! a = a*a
00550 template <class T>
00551 void inplaceSquare(Image<T>& a);
00552 
00553 //! Replace all occurences of a value 'from' by another value 'to'
00554 void inplaceReplaceVal(Image<byte>& dest,
00555                        const byte& from, const byte& to);
00556 
00557 //! Progressive attenuation of borders by "size" pixels
00558 template <class T_or_RGB>
00559 void inplaceAttenuateBorders(Image<T_or_RGB>& a, int size);
00560 
00561 //! Set borders of "size" pixels to given value
00562 /*! Default value is T's default, which should be T's representation
00563     of zero. */
00564 template <class T_or_RGB>
00565 void inplaceSetBorders(Image<T_or_RGB>& a, const int size,
00566                        const T_or_RGB value = T_or_RGB());
00567 
00568 //! Add speckle noise to array; thresh = 1 for 100% noise
00569 void inplaceSpeckleNoise(Image<byte>& dest,
00570                          const float thresh, const int size,
00571                          const byte noise_color,
00572                          bool random_color=false);
00573 
00574 //! Add power-law noise to array
00575 //! Beta is a power-law factor
00576 //! T must be either a float or a double
00577 template <class T>
00578 Image<typename promote_trait<T,float>::TP> 
00579 addPowerNoise(const Image<T>& src, double beta);
00580 
00581 //! Get a sample which is the max value within a circular aperture
00582 /*! This only works for monochromatic images.
00583   @param center coordinates of the center of the aperture
00584   @param radius radius of the aperture, in pixels */
00585 float getLocalMax(const Image<float>& src,
00586                   const Point2D<int>& center, const int radius);
00587 
00588 //! Get a sample which is the average value within a circular aperture
00589 /*! This only works for monochromatic images.
00590   @param center coordinates of the center of the aperture
00591   @param radius radius of the aperture, in pixels */
00592 float getLocalAvg(const Image<float>& src,
00593                   const Point2D<int>& center, const int radius);
00594 
00595 //! Get min and max values
00596 template <class T>
00597 void getMinMax(const Image<T>& src, T& mini, T& maxi);
00598 
00599 //! Get min and max values inside and outside a mask
00600 /*! Only pixels with non-zero mask value are considered. */
00601 template <class T>
00602 void getMaskedMinMax(const Image<T>& src, const Image<byte>& mask,
00603                      T& min_in, T& max_in, T& min_out, T& max_out);
00604 
00605 //! Get min, max and average values
00606 template <class T>
00607 void getMinMaxAvg(const Image<T>& src, T& mini, T& maxi, T& avg);
00608 
00609 //! Get min, max, avg within a binary mask
00610 /*! Only the pixels where mask is non-zero are considered. */
00611 template <class T>
00612 void getMaskedMinMaxAvg(const Image<T>& src, const Image<byte> &mask,
00613                         T& mi, T& ma, T& avg);
00614 
00615 //! Get min, max, sum and area values from a continuously-masked image
00616 /*! The sum is the weighted sum of src by mask. Area is the sum of all
00617   non-zero mask values. */
00618 template <class T>
00619 void getMaskedMinMaxSumArea(const Image<T>& src, const Image<float> &mask,
00620                             T& mi, T& ma,
00621                             typename promote_trait<T,float>::TP &sum,
00622                             typename promote_trait<T,float>::TP &area);
00623 
00624 //! Get min, max, average std deviation and some other stats
00625 template <class T>
00626 void getMinMaxAvgEtc(const Image<T>& src, T& xmini, T& xmaxi, T& xavg, T& xstd,
00627                      ushort& minPosX, ushort& minPosY,
00628                      ushort& maxPosX, ushort& maxPosY,
00629                      uint& pixCount);
00630 
00631 //! Check wether all pixels have finite values
00632 /*! This relies on isFinite() being defined for your pixel type
00633   T. In Types.H we define it for the canonical types, and you can
00634   use these canonical definitions to define it for more complex
00635   types (see, for example, the definition in Pixels.H). */
00636 template <class T>
00637 bool isFinite(const Image<T>& src);
00638 
00639 //! find point of max activity and also what that max activity is
00640 template <class T>
00641 void findMax(const Image<T>& src, Point2D<int>& p, T& maxval);
00642 
00643 //! find point of min activity and also what that min activity is
00644 template <class T>
00645 void findMin(const Image<T>& src, Point2D<int>& p, T& minval);
00646 
00647 //! Saturate values < cmin to cmin and > cmax to cmax
00648 template <class T>
00649 void inplaceClamp(Image<T>& dst, const T cmin, const T cmax);
00650 
00651 //! Normalize values between nmin and nmax
00652 template <class T>
00653 void inplaceNormalize(Image<T>& dst, const T nmin, const T nmax);
00654 
00655 //! Normalize values between nmin and nmax, also return oldmin and oldmax
00656 template <class T>
00657 void inplaceNormalize(Image<T>& dst, const T nmin, const T nmax,
00658                       T& oldmin, T& oldmax);
00659 
00660 //! Return true if point p is a local maximum
00661 /*! We just check that the value at p is >= the values of its 4 neighbors */
00662 template <class T>
00663 bool isLocalMax(const Image<T>& src, const Point2D<int>& p);
00664 
00665 //! Saturate values < 0
00666 template <class T>
00667 void inplaceRectify(Image<T>& dst);
00668 
00669 //! Put all values >= 0 into pos and the negated of all vals <= 0 into neg
00670 template <class T>
00671 void splitPosNeg(const Image<T>& src,
00672                  Image<T>& pos, Image<T>& neg);
00673 
00674 //! Cut values < thresh and replace them by val
00675 template <class T>
00676 void inplaceLowThresh(Image<T>& dst,
00677                       const T thresh, const T val = T());
00678 
00679 //! Cut values whose abs is < thresh and replace them by val
00680 template <class T>
00681 void inplaceLowThreshAbs(Image<T>& dst,
00682                          const T thresh, const T val = T());
00683 
00684 //! Pass image through sigmoid: f(x) = x^g / (s + x^h)
00685 template <class T>
00686 void inplaceSigmoid(Image<T>& dst,
00687                     const float g, const float h, const float s);
00688 
00689 //! Tells how many pixels are zero
00690 template <class T>
00691 int emptyArea(const Image<T>& src);
00692 
00693 //! Counts how many pixels are > thresh (in absolute value if absol true)
00694 template <class T>
00695 int countThresh(const Image<T>& src,
00696                 const T thresh, const bool absol = true);
00697 
00698 //! Return a row vector containing the within-column mean of each input column
00699 Image<float> meanRow(const Image<float>& inp);
00700 
00701 //! Return a row vector containing the within-column standard deviation of each input column
00702 Image<float> stdevRow(const Image<float>& inp);
00703 
00704 //! Return the result of adding vector v to each row of matrix M
00705 Image<float> addRow(const Image<float>& M, const Image<float>& v);
00706 
00707 //! Return the result of subtracting vector v from each row of matrix M
00708 Image<float> subtractRow(const Image<float>& M, const Image<float>& v);
00709 
00710 //! Return the result of multiplying each row of matrix M by vector v
00711 Image<float> multiplyRow(const Image<float>& M, const Image<float>& v);
00712 
00713 //! Return the result of dividing each row of matrix M by vector v
00714 Image<float> divideRow(const Image<float>& M, const Image<float>& v);
00715 
00716 // ######################################################################
00717 // ######################################################################
00718 // ##### Inline function definitions
00719 // ######################################################################
00720 // ######################################################################
00721 
00722 // ######################################################################
00723 template <class T, class F>
00724 inline F fill(Image<T>& x, F func)
00725 {
00726   // optimization; see comment in Image<T>::clear()
00727   if (x.isShared())
00728     x = Image<T>(x.getDims(), NO_INIT);
00729 
00730   for (typename Image<T>::iterator itr = x.beginw(), stop = x.endw();
00731        itr != stop; ++itr)
00732     *itr = T(func());
00733 
00734   return func;
00735 }
00736 
00737 // ######################################################################
00738 template <class T, class F>
00739 inline F apply(Image<T>& x, F func)
00740 {
00741   for (typename Image<T>::iterator itr = x.beginw(), stop = x.endw();
00742        itr != stop; ++itr)
00743     *itr = T(func(*itr));
00744 
00745   return func;
00746 }
00747 
00748 // ######################################################################
00749 template <class T2, class T, class F> inline
00750 Image<T2> transform(const Image<T>& x, F func)
00751 {
00752   Image<T2> result(x.getDims(), NO_INIT);
00753 
00754   typename Image<T>::const_iterator sptr = x.begin();
00755   typename Image<T2>::iterator dptr = result.beginw();
00756   typename Image<T2>::iterator stop = result.endw();
00757 
00758   while (dptr != stop)
00759     *dptr++ = func(*sptr++);
00760 
00761   return result;
00762 }
00763 
00764 // ######################################################################
00765 //! Approximate a polyline with a simpler polyline using the Douglas-Peucker algorithm
00766 // This code was derived from softSurfer (www.softsurfer.com)
00767 std::vector<Point2D<int> >  approxPolyDP(std::vector<Point2D<int> >& points, float tol);
00768 
00769 //The recursion for the DP
00770 void recursePolyDP(float tol, std::vector<Point2D<int> >& v, int j, int k, std::vector<int>& mk);
00771 
00772 //! Approximate 2D hull algorithm using the BFP algorithm
00773 // Copyright 2001, softSurfer (www.softsurfer.com)
00774 // This code may be freely used and modified for any purpose
00775 // providing that this copyright notice is included with it.
00776 // SoftSurfer makes no warranty for this code, and cannot be held
00777 // liable for any real or imagined damage resulting from its use.
00778 // Users of this code must verify correctness for their application.
00779 std::vector<Point2D<float> > approximateHull(std::vector<Point2D<float> > points, int accuracy);
00780 
00781 // ######################################################################
00782 /* So things look consistent in everyone's emacs... */
00783 /* Local Variables: */
00784 /* indent-tabs-mode: nil */
00785 /* End: */
00786 
00787 #endif // !IMAGE_MATHOPS_H_DEFINED
Generated on Sun May 8 08:05:14 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3