00001 /*!@file Image/IntegerMathOps.C Fixed-point integer math versions of some of our floating-point Image functions */ 00002 00003 // //////////////////////////////////////////////////////////////////// // 00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005 // 00005 // by the University of Southern California (USC) and the iLab at USC. // 00006 // See http://iLab.usc.edu for information about this project. // 00007 // //////////////////////////////////////////////////////////////////// // 00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00010 // in Visual Environments, and Applications'' by Christof Koch and // 00011 // Laurent Itti, California Institute of Technology, 2001 (patent // 00012 // pending; application number 09/912,225 filed July 23, 2001; see // 00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00014 // //////////////////////////////////////////////////////////////////// // 00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00016 // // 00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00018 // redistribute it and/or modify it under the terms of the GNU General // 00019 // Public License as published by the Free Software Foundation; either // 00020 // version 2 of the License, or (at your option) any later version. // 00021 // // 00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00025 // PURPOSE. See the GNU General Public License for more details. // 00026 // // 00027 // You should have received a copy of the GNU General Public License // 00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00030 // Boston, MA 02111-1307 USA. // 00031 // //////////////////////////////////////////////////////////////////// // 00032 // 00033 // Primary maintainer for this file: Rob Peters <rjpeters at usc dot edu> 00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Image/IntegerMathOps.C $ 00035 // $Id: IntegerMathOps.C 10983 2009-03-05 07:19:14Z itti $ 00036 // 00037 00038 #ifndef IMAGE_INTEGERMATHOPS_C_DEFINED 00039 #define IMAGE_INTEGERMATHOPS_C_DEFINED 00040 00041 #include "Image/IntegerMathOps.H" 00042 00043 #include "Image/CutPaste.H" // for shiftClean() 00044 #include "Image/ImageSetOps.H" // for operator/=() 00045 #include "Image/MathOps.H" // for inplaceRectify() 00046 #include "Image/Pixels.H" 00047 #include "Image/PyramidCache.H" 00048 #include "Image/ShapeOps.H" // for decXY(), decX(), decY() 00049 #include "rutz/rand.h" 00050 00051 // ###################################################################### 00052 int intgScaleFromByte(const byte* src, const uint nbits) 00053 { 00054 if (nbits > 8) 00055 return (int(*src) << (nbits - 8)); 00056 else if (nbits < 8) 00057 return (int(*src) >> (8 - nbits)); 00058 // else nbits == 8 00059 return int(*src); 00060 } 00061 00062 // ###################################################################### 00063 int intgScaleFromFloat(const float* src, const uint nbits) 00064 { 00065 if (nbits > 8) 00066 return (int(*src * (1 << (nbits - 8)))); 00067 else if (nbits < 8) 00068 return (int(*src / (1 << (8 - nbits)))); 00069 // else nbits == 8 00070 return int(*src); 00071 } 00072 00073 // ###################################################################### 00074 Image<int> intgScaleFromByte(const Image<byte>* src, const uint nbits) 00075 { 00076 if (nbits > 8) 00077 return (Image<int>(*src) * (1 << (nbits - 8))); 00078 else if (nbits < 8) 00079 return (Image<int>(*src) / (1 << (8 - nbits))); 00080 // else nbits == 8 00081 return Image<int>(*src); 00082 } 00083 00084 // ###################################################################### 00085 Image<int> intgScaleFromFloat(const Image<float>* src, const uint nbits) 00086 { 00087 if (nbits > 8) 00088 return (Image<int>(*src * (1 << (nbits - 8)))); 00089 else if (nbits < 8) 00090 return (Image<int>(*src / (1 << (8 - nbits)))); 00091 // else nbits == 8 00092 return Image<int>(*src); 00093 } 00094 00095 // ###################################################################### 00096 Image<PixRGB<int> > intgScaleFromByte(const Image<PixRGB<byte> >* src, const uint nbits) 00097 { 00098 Image<PixRGB<int> > result(src->getDims(), NO_INIT); 00099 Image<PixRGB<byte> >::const_iterator sptr = src->begin(); 00100 Image<PixRGB<int> >::iterator dptr = result.beginw(); 00101 Image<PixRGB<int> >::iterator const stop = result.endw(); 00102 00103 if (nbits > 8) 00104 { 00105 const int lshift = nbits - 8; 00106 while (dptr != stop) 00107 { 00108 *dptr++ = PixRGB<int>(int(sptr->p[0]) << lshift, 00109 int(sptr->p[1]) << lshift, 00110 int(sptr->p[2]) << lshift); 00111 ++sptr; 00112 } 00113 } 00114 else if (nbits < 8) 00115 { 00116 const int rshift = 8 - nbits; 00117 while (dptr != stop) 00118 { 00119 *dptr++ = PixRGB<int>(int(sptr->p[0]) >> rshift, 00120 int(sptr->p[1]) >> rshift, 00121 int(sptr->p[2]) >> rshift); 00122 ++sptr; 00123 } 00124 } 00125 else // nbits == 8 00126 { 00127 while (dptr != stop) 00128 { 00129 *dptr++ = PixRGB<int>(int(sptr->p[0]), 00130 int(sptr->p[1]), 00131 int(sptr->p[2])); 00132 ++sptr; 00133 } 00134 } 00135 00136 return result; 00137 } 00138 00139 // ###################################################################### 00140 Image<int> intgScaleLuminanceFromByte(const Image<PixRGB<byte> >* src, const uint nbits) 00141 { 00142 Image<int> result(src->getDims(), NO_INIT); 00143 Image<PixRGB<byte> >::const_iterator sptr = src->begin(); 00144 Image<int>::iterator dptr = result.beginw(); 00145 Image<int>::iterator const stop = result.endw(); 00146 00147 if (nbits > 8) 00148 while (dptr != stop) 00149 { *dptr++ = ((sptr->p[0] + sptr->p[1] + sptr->p[2]) / 3) << (nbits - 8); ++sptr; } 00150 else if (nbits < 8) 00151 while (dptr != stop) 00152 { *dptr++ = ((sptr->p[0] + sptr->p[1] + sptr->p[2]) / 3) >> (8 - nbits); ++sptr; } 00153 else // nbits == 8 00154 while (dptr != stop) 00155 { *dptr++ = (sptr->p[0] + sptr->p[1] + sptr->p[2]) / 3; ++sptr; } 00156 00157 return result; 00158 } 00159 00160 // ###################################################################### 00161 Image<PixRGB<int> > intgScaleFromFloat(const Image<PixRGB<float> >* src, const uint nbits) 00162 { 00163 if (nbits > 8) 00164 return (Image<PixRGB<int> >(*src * (1 << (nbits - 8)))); 00165 else if (nbits < 8) 00166 return (Image<PixRGB<int> >(*src / (1 << (8 - nbits)))); 00167 // else nbits == 8 00168 return Image<PixRGB<int> >(*src); 00169 } 00170 00171 // ###################################################################### 00172 Image<float> intgScaleToFloat(const Image<int>* src, const uint nbits) 00173 { 00174 if (nbits > 8) 00175 return (Image<float>(*src) / float(1 << (nbits - 8))); 00176 else if (nbits < 8) 00177 return (Image<float>(*src) * (1 << (8 - nbits))); 00178 // else nbits == 8 00179 return Image<float>(*src); 00180 } 00181 00182 // ###################################################################### 00183 // Anderson's separable kernel: 1/16 * [1 4 6 4 1] 00184 Image<int> intgLowPass5xDecX(const Image<int>& src, 00185 const integer_math* imath) 00186 { 00187 const int w = src.getWidth(), h = src.getHeight(); 00188 00189 if (w < 2) return src; // nothing to smooth 00190 00191 const int w2 = w / 2; 00192 ASSERT(w2 > 0); 00193 00194 Image<int> result(w2, h, NO_INIT); 00195 00196 const int filterbits = 4; // log2(16) 00197 const int accumbits = 3; // ceil(log2(5)) 00198 00199 if ((imath->nbits + filterbits + accumbits) >= (8*sizeof(int) - 1)) 00200 (*imath->low_pass_5_x_dec_x_manybits)(src.getArrayPtr(), w, h, 00201 result.getArrayPtr(), w2); 00202 else 00203 (*imath->low_pass_5_x_dec_x_fewbits)(src.getArrayPtr(), w, h, 00204 result.getArrayPtr(), w2); 00205 00206 return result; 00207 } 00208 00209 // ###################################################################### 00210 // Anderson's separable kernel: 1/16 * [1 4 6 4 1] 00211 Image<int> intgLowPass5yDecY(const Image<int>& src, 00212 const integer_math* imath) 00213 { 00214 const int w = src.getWidth(), h = src.getHeight(); 00215 00216 if (h < 2) return src; // nothing to smooth 00217 00218 const int h2 = h / 2; 00219 ASSERT(h2 > 0); 00220 00221 Image<int> result(w, h2, NO_INIT); 00222 00223 const int filterbits = 4; // log2(16) 00224 const int accumbits = 3; // ceil(log2(5)) 00225 00226 if ((imath->nbits + filterbits + accumbits) >= (8*sizeof(int) - 1)) 00227 (*imath->low_pass_5_y_dec_y_manybits)(src.getArrayPtr(), w, h, 00228 result.getArrayPtr(), h2); 00229 else 00230 (*imath->low_pass_5_y_dec_y_fewbits)(src.getArrayPtr(), w, h, 00231 result.getArrayPtr(), h2); 00232 00233 return result; 00234 } 00235 00236 // ###################################################################### 00237 Image<int> intgXFilterClean(const Image<int>& src, 00238 const int* hFilt, const int hfs, 00239 const int shiftbits, 00240 const integer_math* imath) 00241 { 00242 if (hfs == 0) 00243 return src; 00244 00245 int hFiltFlipped[hfs]; // flipped filter to accelerate convolution: 00246 int sumh = 0; 00247 for (int x = 0; x < hfs; x ++) 00248 { sumh += hFilt[x]; hFiltFlipped[hfs-1-x] = hFilt[x]; } 00249 00250 ASSERT(sumh == (1 << shiftbits)); 00251 00252 const int accumbits = int(ceil(log2(double(hfs)))); 00253 00254 Image<int> result(src.getDims(), NO_INIT); 00255 00256 if ((imath->nbits + shiftbits + accumbits) >= (8*sizeof(int) - 1)) 00257 { 00258 if (src.getWidth() < hfs) 00259 (*imath->x_filter_clean_small_manybits) 00260 (src.getArrayPtr(), src.getWidth(), src.getHeight(), 00261 hFiltFlipped, hfs, shiftbits, 00262 result.getArrayPtr()); 00263 else 00264 (*imath->x_filter_clean_manybits) 00265 (src.getArrayPtr(), src.getWidth(), src.getHeight(), 00266 hFiltFlipped, hfs, shiftbits, 00267 result.getArrayPtr()); 00268 } 00269 else 00270 { 00271 if (src.getWidth() < hfs) 00272 (*imath->x_filter_clean_small_fewbits) 00273 (src.getArrayPtr(), src.getWidth(), src.getHeight(), 00274 hFiltFlipped, hfs, shiftbits, 00275 result.getArrayPtr()); 00276 else 00277 (*imath->x_filter_clean_fewbits) 00278 (src.getArrayPtr(), src.getWidth(), src.getHeight(), 00279 hFiltFlipped, hfs, shiftbits, 00280 result.getArrayPtr()); 00281 } 00282 00283 return result; 00284 } 00285 00286 // ###################################################################### 00287 Image<int> intgYFilterClean(const Image<int>& src, 00288 const int* vFilt, const int vfs, 00289 const int shiftbits, 00290 const integer_math* imath) 00291 { 00292 if (vfs == 0) 00293 return src; 00294 00295 int vFiltFlipped[vfs]; // flipped filter to accelerate convolution 00296 int sumv = 0; 00297 for (int x = 0; x < vfs; x ++) 00298 { sumv += vFilt[x]; vFiltFlipped[vfs-1-x] = vFilt[x]; } 00299 00300 ASSERT(sumv == (1 << shiftbits)); 00301 00302 const int accumbits = int(ceil(log2(double(vfs)))); 00303 00304 Image<int> result(src.getDims(), NO_INIT); 00305 00306 if ((imath->nbits + shiftbits + accumbits) >= (8*sizeof(int) - 1)) 00307 { 00308 if (src.getHeight() < vfs) 00309 (*imath->y_filter_clean_small_manybits) 00310 (src.getArrayPtr(), src.getWidth(), src.getHeight(), 00311 vFiltFlipped, vfs, shiftbits, 00312 result.getArrayPtr()); 00313 else 00314 (*imath->y_filter_clean_manybits) 00315 (src.getArrayPtr(), src.getWidth(), src.getHeight(), 00316 vFiltFlipped, vfs, shiftbits, 00317 result.getArrayPtr()); 00318 } 00319 else 00320 { 00321 if (src.getHeight() < vfs) 00322 (*imath->y_filter_clean_small_fewbits) 00323 (src.getArrayPtr(), src.getWidth(), src.getHeight(), 00324 vFiltFlipped, vfs, shiftbits, 00325 result.getArrayPtr()); 00326 else 00327 (*imath->y_filter_clean_fewbits) 00328 (src.getArrayPtr(), src.getWidth(), src.getHeight(), 00329 vFiltFlipped, vfs, shiftbits, 00330 result.getArrayPtr()); 00331 } 00332 00333 return result; 00334 } 00335 00336 // ###################################################################### 00337 Image<int> intgLowPass9x(const Image<int>& src, 00338 const integer_math* imath) 00339 { 00340 const int w = src.getWidth(), h = src.getHeight(); 00341 00342 if (w < 2) return src; // nothing to smooth 00343 00344 if (w < 9) // use inefficient implementation for small images 00345 { 00346 const int kernel[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 }; 00347 const int shiftbits = 8; // 256 = (1<<8) 00348 return intgXFilterClean(src, kernel, 9, shiftbits, imath); 00349 } 00350 00351 Image<int> result(w, h, NO_INIT); 00352 00353 const int filterbits = 8; // log2(256) 00354 const int accumbits = 4; // ceil(log2(9)) 00355 00356 if ((imath->nbits + filterbits + accumbits) >= (8*sizeof(int) - 1)) 00357 (*imath->low_pass_9_x_manybits)(src.getArrayPtr(), w, h, 00358 result.getArrayPtr()); 00359 else 00360 (*imath->low_pass_9_x_fewbits)(src.getArrayPtr(), w, h, 00361 result.getArrayPtr()); 00362 00363 return result; 00364 } 00365 00366 // ###################################################################### 00367 Image<int> intgLowPass9y(const Image<int>& src, 00368 const integer_math* imath) 00369 { 00370 const int w = src.getWidth(), h = src.getHeight(); 00371 00372 if (h < 2) return src; // nothing to smooth 00373 00374 if (h < 9) // use inefficient implementation for small images 00375 { 00376 const int kernel[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 }; 00377 const int shiftbits = 8; // 256 = (1<<8) 00378 return intgYFilterClean(src, kernel, 9, shiftbits, imath); 00379 } 00380 00381 Image<int> result(w, h, NO_INIT); 00382 00383 const int filterbits = 8; // log2(256) 00384 const int accumbits = 4; // ceil(log2(9)) 00385 00386 if ((imath->nbits + filterbits + accumbits) >= (8*sizeof(int) - 1)) 00387 (*imath->low_pass_9_y_manybits)(src.getArrayPtr(), w, h, 00388 result.getArrayPtr()); 00389 else 00390 (*imath->low_pass_9_y_fewbits)(src.getArrayPtr(), w, h, 00391 result.getArrayPtr()); 00392 00393 return result; 00394 } 00395 00396 // ###################################################################### 00397 Image<int> intgLowPassX(int filterSize, const Image<int>& src, 00398 const integer_math* imath) 00399 { 00400 switch (filterSize) 00401 { 00402 case 9: return intgLowPass9x(src, imath); 00403 default: 00404 LFATAL("Filter size %d is not supported", filterSize); 00405 } 00406 00407 /* can't happen */ return Image<int>(); 00408 } 00409 00410 // ###################################################################### 00411 Image<int> intgLowPassY(int filterSize, const Image<int>& src, 00412 const integer_math* imath) 00413 { 00414 switch (filterSize) 00415 { 00416 case 9: return intgLowPass9y(src, imath); 00417 default: 00418 LFATAL("Filter size %d is not supported", filterSize); 00419 } 00420 00421 /* can't happen */ return Image<int>(); 00422 } 00423 00424 // ###################################################################### 00425 Image<int> intgLowPass(int filterSize, const Image<int>& src, 00426 const integer_math* imath) 00427 { 00428 return intgLowPassY(filterSize, 00429 intgLowPassX(filterSize, src, imath), 00430 imath); 00431 } 00432 00433 // ###################################################################### 00434 Image<int> intgQuadEnergy(const Image<int>& img1, const Image<int>& img2) 00435 { 00436 ASSERT(img1.isSameSize(img2)); 00437 00438 Image<int> result(img1.getDims(), NO_INIT); 00439 00440 Image<int>::const_iterator s1ptr = img1.begin(); 00441 Image<int>::const_iterator s2ptr = img2.begin(); 00442 Image<int>::iterator dptr = result.beginw(); 00443 Image<int>::iterator stop = result.endw(); 00444 00445 while (dptr != stop) 00446 { 00447 const int s1 = abs(*s1ptr++); 00448 const int s2 = abs(*s2ptr++); 00449 00450 /* "A Fast Approximation to the Hypotenuse" by Alan Paeth, from 00451 "Graphics Gems", Academic Press, 1990 00452 00453 http://www.acm.org/pubs/tog/GraphicsGems/gems/HypotApprox.c 00454 00455 gives approximate value of sqrt(s1*s1+s2*s2) with only 00456 overestimations, and then never by more than (9/8) + one bit 00457 uncertainty 00458 */ 00459 *dptr++ = (s1 > s2) ? (s1 + (s2 >> 1)) : ((s1 >> 1) + s2); 00460 } 00461 00462 return result; 00463 } 00464 00465 // ###################################################################### 00466 Image<int> intgOrientedFilter(const Image<int>& src, 00467 const float k, const float theta, 00468 const integer_math* imath) 00469 { 00470 00471 static IntgTrigTable<256, 8> trig; 00472 00473 const int KBITS = 8; 00474 00475 const int thetaidx = trig.indexDegrees(theta + 90.0); 00476 00477 const double kx = double(k) * trig.costab[thetaidx] / double(1<<trig.nbits); 00478 const double ky = double(k) * trig.sintab[thetaidx] / double(1<<trig.nbits); 00479 00480 const int kxnum = int((kx * (1 << KBITS) * trig.tabsiz) / (2.0*M_PI)); 00481 const int kynum = int((ky * (1 << KBITS) * trig.tabsiz) / (2.0*M_PI)); 00482 00483 Image<int> re(src.getDims(), NO_INIT), im(src.getDims(), NO_INIT); 00484 Image<int>::const_iterator sptr = src.begin(); 00485 Image<int>::iterator reptr = re.beginw(), imptr = im.beginw(); 00486 00487 // (x,y) = (0,0) at center of image: 00488 const int w2l = src.getWidth() / 2; 00489 const int w2r = src.getWidth() - w2l; 00490 const int h2l = src.getHeight() / 2; 00491 const int h2r = src.getHeight() - h2l; 00492 00493 // let's do a conservative check to make sure that we won't overflow 00494 // when we compute "arg" later on -- as a very rough estimate, kxnum 00495 // and kynum are on the order of 2^16 (8 bits from KBITS=8, 8 bits 00496 // from trig.tabsiz=256), which gives room for w+h to be up to about 00497 // 2^15 00498 ASSERT((std::numeric_limits<int>::max() / (abs(kxnum) + abs(kynum))) 00499 > (w2r + h2r)); 00500 00501 ASSERT((2 * trig.nbits + 1) < 8*sizeof(int)); 00502 00503 const int mdcutoff = 00504 std::numeric_limits<int>::max() >> (trig.nbits+1); 00505 00506 for (int j = -h2l; j < h2r; ++j) 00507 for (int i = -w2l; i < w2r; ++i) 00508 { 00509 const int arg = (i * kxnum + j * kynum) >> KBITS; 00510 00511 int idx = arg % trig.tabsiz; 00512 if (idx < 0) idx += trig.tabsiz; 00513 00514 const int sval = *sptr++; 00515 00516 if (sval == 0) 00517 { 00518 *reptr++ = 0; 00519 *imptr++ = 0; 00520 } 00521 else if (abs(sval) < mdcutoff) 00522 { 00523 *reptr++ = (sval * trig.costab[idx]) >> (trig.nbits+1); 00524 *imptr++ = (sval * trig.sintab[idx]) >> (trig.nbits+1); 00525 } 00526 else 00527 { 00528 const int val = (sval >> (trig.nbits+1)); 00529 *reptr++ = val * trig.costab[idx]; 00530 *imptr++ = val * trig.sintab[idx]; 00531 } 00532 } 00533 00534 re = intgLowPass(9, re, imath); 00535 im = intgLowPass(9, im, imath); 00536 00537 return intgQuadEnergy(re, im); 00538 } 00539 00540 // ###################################################################### 00541 void intgInplaceAttenuateBorders(Image<int>& a, int size) 00542 { 00543 ASSERT(a.initialized()); 00544 00545 Dims dims = a.getDims(); 00546 00547 if (size * 2 > dims.w()) size = dims.w() / 2; 00548 if (size * 2 > dims.h()) size = dims.h() / 2; 00549 if (size < 1) return; // forget it 00550 00551 // top lines: 00552 int coeff = 1; 00553 Image<int>::iterator aptr = a.beginw(); 00554 for (int y = 0; y < size; y ++) 00555 { 00556 for (int x = 0; x < dims.w(); x ++) 00557 { 00558 *aptr = ((*aptr) / (size+1)) * coeff; 00559 ++aptr; 00560 } 00561 ++coeff; 00562 } 00563 // normal lines: start again from beginning to attenuate corners twice: 00564 aptr = a.beginw(); 00565 for (int y = 0; y < dims.h(); y ++) 00566 { 00567 coeff = 1; 00568 for (int x = 0; x < size; x ++) 00569 { 00570 *(aptr + dims.w() - 1 - x * 2) = 00571 (*(aptr + dims.w() - 1 - x * 2) / (size+1)) * coeff; 00572 00573 *aptr = ((*aptr) / (size+1)) * coeff; 00574 ++aptr; 00575 ++coeff; 00576 } 00577 aptr += dims.w() - size; 00578 } 00579 // bottom lines 00580 aptr = a.beginw() + (dims.h() - size) * dims.w(); 00581 coeff = size; 00582 for (int y = dims.h() - size; y < dims.h(); y ++) 00583 { 00584 for (int x = 0; x < dims.w(); ++x) 00585 { 00586 *aptr = ((*aptr) / (size+1)) * coeff; 00587 ++aptr; 00588 } 00589 --coeff; 00590 } 00591 } 00592 00593 // ###################################################################### 00594 ImageSet<int> intgBuildPyrLaplacian(const Image<int>& image, 00595 int firstlevel, int depth, 00596 int filterSize, 00597 const integer_math* imath) 00598 { 00599 ASSERT(image.initialized()); 00600 00601 ImageSet<int> result(depth); 00602 00603 // compute laplacian as image - LPF(image), then compute oriented 00604 // filter: 00605 00606 Image<int> lpfima; 00607 00608 for (int lev = 0; lev < depth; ++lev) 00609 { 00610 const Image<int> dec = 00611 lev == 0 ? image : decXY(lpfima); 00612 lpfima = intgLowPass(filterSize, dec, imath); 00613 00614 if (lev >= firstlevel) 00615 result[lev] = dec-lpfima; 00616 } 00617 00618 return result; 00619 } 00620 00621 // ###################################################################### 00622 ImageSet<int> intgBuildPyrOrientedFromLaplacian(const ImageSet<int>& lplc, 00623 const int filterSize, 00624 const float theta, 00625 const integer_math* imath) 00626 { 00627 int attenuation_width = -1; 00628 float spatial_freq = -1.0; 00629 00630 switch (filterSize) 00631 { 00632 case 9: attenuation_width = 5; spatial_freq = 2.6; break; 00633 default: 00634 LFATAL("Filter size %d is not supported", filterSize); 00635 } 00636 00637 ImageSet<int> result(lplc.size()); 00638 00639 for (uint lev = 0; lev < lplc.size(); ++lev) 00640 { 00641 // if the laplacian is empty at a given level, then just leave 00642 // the output empty at that level, too 00643 if (!lplc[lev].initialized()) 00644 continue; 00645 00646 result[lev] = intgOrientedFilter(lplc[lev], spatial_freq, theta, 00647 imath); 00648 // attenuate borders that are overestimated due to filter trunctation: 00649 intgInplaceAttenuateBorders(result[lev], attenuation_width); 00650 } 00651 00652 return result; 00653 } 00654 00655 // ###################################################################### 00656 ImageSet<int> intgBuildPyrOriented(const Image<int>& image, 00657 int firstlevel, int depth, 00658 int filterSize, float theta, 00659 const integer_math* imath) 00660 { 00661 const ImageSet<int> lplc = 00662 intgBuildPyrLaplacian(image, firstlevel, depth, filterSize, imath); 00663 00664 return intgBuildPyrOrientedFromLaplacian(lplc, filterSize, 00665 theta, imath); 00666 } 00667 00668 // ###################################################################### 00669 ImageSet<int> intgBuildPyrGaussian(const Image<int>& image, 00670 int depth, int filterSize, 00671 const integer_math* imath) 00672 { 00673 ASSERT(image.initialized()); 00674 00675 ImageSet<int> result(depth); 00676 00677 result[0] = image; 00678 for (int lev = 1; lev < depth; ++lev) 00679 { 00680 Image<int> a = result.getImage(lev-1); 00681 if (filterSize == 5) 00682 { 00683 a = intgLowPass5xDecX(a,imath); 00684 a = intgLowPass5yDecY(a,imath); 00685 } 00686 else 00687 { 00688 a = decX(intgLowPassX(filterSize, a, imath)); 00689 a = decY(intgLowPassY(filterSize, a, imath)); 00690 } 00691 result[lev] = a; 00692 } 00693 00694 return result; 00695 } 00696 00697 // ###################################################################### 00698 Image<int> intgDownSize(const Image<int>& src, const Dims& dims, 00699 const int filterWidth, 00700 const integer_math* imath) 00701 { 00702 const int new_w = dims.w(); 00703 const int new_h = dims.h(); 00704 00705 if (src.getWidth() == new_w && src.getHeight() == new_h) return src; 00706 00707 ASSERT(src.getWidth() / new_w > 1 && src.getHeight() / new_h > 1); 00708 00709 const int wdepth = int(0.5+log(double(src.getWidth() / new_w)) / M_LN2); 00710 const int hdepth = int(0.5+log(double(src.getHeight() / new_h)) / M_LN2); 00711 00712 if (wdepth != hdepth) 00713 LFATAL("arrays must have same proportions"); 00714 00715 Image<int> result = src; 00716 00717 for (int i = 0; i < wdepth; ++i) 00718 { 00719 result = decX(intgLowPassX(filterWidth, result, imath)); 00720 result = decY(intgLowPassY(filterWidth, result, imath)); 00721 } 00722 00723 return result; 00724 } 00725 00726 // ###################################################################### 00727 Image<int> intgRescale(const Image<int>& src, const Dims& dims) 00728 { 00729 const int new_w = dims.w(); 00730 const int new_h = dims.h(); 00731 00732 ASSERT(src.initialized()); ASSERT(new_w > 0 && new_h > 0); 00733 00734 const int orig_w = src.getWidth(); 00735 const int orig_h = src.getHeight(); 00736 00737 // check if same size already 00738 if (new_w == orig_w && new_h == orig_h) return src; 00739 00740 Image<int> result(new_w, new_h, NO_INIT); 00741 Image<int>::iterator dptr = result.beginw(); 00742 Image<int>::const_iterator const sptr = src.begin(); 00743 00744 // code inspired from one of the Graphics Gems book: 00745 /* 00746 (1) (x,y) are the original coords corresponding to scaled coords (i,j) 00747 (2) (x0,y0) are the greatest lower bound integral coords from (x,y) 00748 (3) (x1,y1) are the least upper bound integral coords from (x,y) 00749 (4) d00, d10, d01, d11 are the values of the original image at the corners 00750 of the rect (x0,y0),(x1,y1) 00751 (5) the value in the scaled image is computed from bilinear interpolation 00752 among d00,d10,d01,d11 00753 */ 00754 for (int j = 0; j < new_h; ++j) 00755 { 00756 const int y_numer = std::max(0, j*2*orig_h+orig_h-new_h); 00757 const int y_denom = 2*new_h; 00758 00759 const int y0 = y_numer / y_denom; 00760 const int y1 = std::min(y0 + 1, orig_h - 1); 00761 00762 const int fy_numer = y_numer - y0 * y_denom; 00763 const int fy_denom = y_denom; 00764 ASSERT(fy_numer == (y_numer % y_denom)); 00765 00766 const int wy0 = orig_w * y0; 00767 const int wy1 = orig_w * y1; 00768 00769 for (int i = 0; i < new_w; ++i) 00770 { 00771 const int x_numer = std::max(0, i*2*orig_w+orig_w-new_w); 00772 const int x_denom = 2*new_w; 00773 00774 const int x0 = x_numer / x_denom; 00775 const int x1 = std::min(x0 + 1, orig_w - 1); 00776 00777 const int fx_numer = x_numer - x0 * x_denom; 00778 const int fx_denom = x_denom; 00779 ASSERT(fx_numer == (x_numer % x_denom)); 00780 00781 const int 00782 d00( sptr[x0 + wy0] ), d10( sptr[x1 + wy0] ), 00783 d01( sptr[x0 + wy1] ), d11( sptr[x1 + wy1] ), 00784 dx0( d00 + ((d10 - d00) / fx_denom) * fx_numer ), 00785 dx1( d01 + ((d11 - d01) / fx_denom) * fx_numer ); 00786 00787 *dptr++ = dx0 + ((dx1 - dx0) / fy_denom) * fy_numer; // no need to clamp 00788 } 00789 } 00790 return result; 00791 } 00792 00793 // ###################################################################### 00794 void intgInplaceAddBGnoise(Image<int>& src, int max) 00795 { 00796 const int range = max / 100000; 00797 00798 ASSERT(src.initialized()); 00799 const int w = src.getWidth(), h = src.getHeight(); 00800 00801 // do not put noise very close to image borders: 00802 int siz = std::min(w, h) / 10; 00803 00804 Image<int>::iterator sptr = src.beginw() + siz + siz * w; 00805 00806 static rutz::urand rnd; 00807 00808 for (int j = siz; j < h - siz; ++j) 00809 { 00810 for (int i = siz; i < w - siz; ++i) 00811 *sptr++ += rnd.idraw(range); 00812 sptr += siz + siz; 00813 } 00814 } 00815 00816 // ###################################################################### 00817 Image<int> intgMaxNormalize(const Image<int>& src, 00818 const int mi, const int ma, const MaxNormType normtyp) 00819 { 00820 00821 Image<int> result; 00822 00823 // do normalization depending on desired type: 00824 switch(normtyp) 00825 { 00826 case VCXNORM_NONE: 00827 result = intgMaxNormalizeNone(src, mi, ma); 00828 break; 00829 case VCXNORM_MAXNORM: 00830 result = intgMaxNormalizeStd(src, mi, ma); 00831 break; 00832 // case VCXNORM_FANCYFAST: 00833 // result = maxNormalizeFancyFast(src, mi, ma, nbiter); 00834 // break; 00835 // case VCXNORM_FANCYONE: 00836 // nbiter = 1; 00837 // case VCXNORM_FANCY: 00838 // result = maxNormalizeFancy(src, mi, ma, nbiter, 1.0, lrexcit); 00839 // break; 00840 // case VCXNORM_FANCYLANDMARK: 00841 // result = maxNormalizeFancyLandmark(src, mi, ma, nbiter); 00842 // break; 00843 // case VCXNORM_LANDMARK: 00844 // result = maxNormalizeLandmark(src, mi, ma); 00845 // break; 00846 // case VCXNORM_FANCYWEAK: 00847 // result = maxNormalizeFancy(src, mi, ma, nbiter, 0.5); 00848 // break; 00849 // case VCXNORM_FANCYVWEAK: 00850 // result = maxNormalizeFancy(src, mi, ma, nbiter, 0.1); 00851 // break; 00852 // case VCXNORM_IGNORE: 00853 // result = src; 00854 // break; 00855 // case VCXNORM_SURPRISE: 00856 // result = src; 00857 // break; 00858 default: 00859 LFATAL("Unknown normalization type: %d", int(normtyp)); 00860 } 00861 00862 return result; 00863 } 00864 00865 // ###################################################################### 00866 Image<int> intgMaxNormalizeNone(const Image<int>& src, 00867 const int nmi, const int nma) 00868 { 00869 Image<int> result = src; 00870 00871 // first clamp negative values to zero 00872 inplaceRectify(result); 00873 00874 // then, normalize between mi and ma if not zero 00875 int mi = nmi; 00876 int ma = nma; 00877 if (mi != 0 || ma != 0) 00878 intgInplaceNormalize(result, nmi, nma, &mi, &ma); 00879 00880 return result; 00881 } 00882 00883 // ###################################################################### 00884 Image<int> intgMaxNormalizeStd(const Image<int>& src, 00885 const int nmi, const int nma) 00886 { 00887 ASSERT(src.initialized()); 00888 00889 Image<int> result = src; 00890 00891 // first clamp negative values to zero 00892 inplaceRectify(result); 00893 00894 // then, normalize between mi and ma if not zero 00895 int mi = nmi; 00896 int ma = nma; 00897 if (nmi != 0 || nma != 0) 00898 intgInplaceNormalize(result, nmi, nma, &mi, &ma); 00899 00900 const int w = result.getWidth(); 00901 const int h = result.getHeight(); 00902 00903 // normalize between mi and ma and multiply by (max - mean)^2 00904 00905 // we want to detect quickly local maxes, but avoid getting local mins 00906 const int thresh = mi + (ma - mi) / 10; 00907 00908 // then get the mean value of the local maxima: 00909 std::vector<int> vals; 00910 Image<int>::const_iterator const dptr = result.begin(); 00911 for (int j = 1; j < h - 1; ++j) 00912 for (int i = 1; i < w - 1; ++i) 00913 { 00914 const int index = i + w * j; 00915 const int val = dptr[index]; 00916 if (val >= thresh && 00917 val >= dptr[index - w] && 00918 val >= dptr[index + w] && 00919 val >= dptr[index - 1] && 00920 val >= dptr[index + 1]) // local max 00921 { 00922 vals.push_back(val); 00923 } 00924 } 00925 00926 int lm_mean = 0; 00927 for (size_t i = 0; i < vals.size(); ++i) 00928 lm_mean += (vals[i] / vals.size()); 00929 00930 ASSERT(ma >= lm_mean); 00931 00932 // scale factor is (max - mean_local_max)^2: 00933 if (vals.size() > 1) 00934 { 00935 // make sure that (ma - lm_mean)^2 won't overflow: 00936 ASSERT((ma == lm_mean) || 00937 ((std::numeric_limits<int>::max() / (ma - lm_mean)) 00938 > (ma - lm_mean))); 00939 00940 const int factor = ((ma - lm_mean) * (ma - lm_mean)) / ma; 00941 result *= factor; 00942 } 00943 else if (vals.size() == 1) // a single narrow peak 00944 { 00945 const int factor = ma; 00946 result *= factor; 00947 } 00948 else 00949 { 00950 /* LERROR("No local maxes found !!"); */ 00951 } 00952 00953 return result; 00954 } 00955 00956 // ###################################################################### 00957 Image<int> intgCenterSurround(const ImageSet<int>& pyr, 00958 int lev1, int lev2, bool absol, 00959 const ImageSet<int>* clipPyr) 00960 { 00961 ASSERT(lev1 >= 0 && lev2 >= 0); 00962 ASSERT(uint(lev1) < pyr.size() && uint(lev2) < pyr.size()); 00963 00964 const int largeLev = std::min(lev1, lev2); 00965 const int smallLev = std::max(lev1, lev2); 00966 00967 if (clipPyr != 0 && clipPyr->isNonEmpty()) 00968 { 00969 LFATAL("clipping pyramids not supported"); 00970 00971 ASSERT((*clipPyr)[largeLev].getDims() == pyr[largeLev].getDims()); 00972 ASSERT((*clipPyr)[smallLev].getDims() == pyr[smallLev].getDims()); 00973 00974 // FIXME: this will overflow: 00975 return 00976 intgCenterSurround(Image<int>(pyr[largeLev] * (*clipPyr)[largeLev]), 00977 Image<int>(pyr[smallLev] * (*clipPyr)[smallLev]), 00978 absol); 00979 } 00980 else 00981 return intgCenterSurround(pyr[largeLev], pyr[smallLev], absol); 00982 } 00983 00984 // ###################################################################### 00985 void intgDoLowThresh(ImageSet<int>& x, int threshold, int newval) 00986 { 00987 for (uint i = 0; i < x.size(); ++i) 00988 inplaceLowThresh(x[i], threshold, newval); 00989 } 00990 00991 // ###################################################################### 00992 void intgDoLowThreshAbs(ImageSet<int>& x, int threshold, int newval) 00993 { 00994 for (uint i = 0; i < x.size(); ++i) 00995 inplaceLowThreshAbs(x[i], threshold, newval); 00996 } 00997 00998 // ###################################################################### 00999 void intgDoRectify(ImageSet<int>& x) 01000 { 01001 for (uint i = 0; i < x.size(); ++i) 01002 inplaceRectify(x[i]); 01003 } 01004 01005 // ###################################################################### 01006 void intgInplaceNormalize(Image<int>& dst, const int nmin, const int nmax, 01007 int* actualmin_p, int* actualmax_p) 01008 { 01009 ASSERT(dst.initialized()); 01010 ASSERT(nmax >= nmin); 01011 01012 int mi, ma; getMinMax(dst, mi, ma); 01013 const int scale = ma - mi; 01014 if (scale == 0) { dst.clear(0); return; } // input image is uniform 01015 const int nscale = nmax - nmin; 01016 01017 if (nscale == 0) { dst.clear(nmin); return; } // output range is uniform 01018 01019 Image<int>::iterator aptr = dst.beginw(); 01020 Image<int>::iterator stop = dst.endw(); 01021 01022 int actualmin, actualmax; 01023 01024 if (scale == nscale) 01025 { 01026 const int add = nmin - mi; 01027 while (aptr != stop) 01028 { 01029 *aptr += add; 01030 ++aptr; 01031 } 01032 01033 actualmin = nmin; 01034 actualmax = nmax; 01035 } 01036 else if (scale > nscale) 01037 { 01038 const int div = scale / nscale; 01039 01040 while (aptr != stop) 01041 { 01042 *aptr = nmin + ((*aptr - mi) / div); 01043 ++aptr; 01044 } 01045 01046 // FIXME sometimes we we will end up with actualmax>nmax, which 01047 // is not really what the user wants; yet, we can't do arbitrary 01048 // precision arithmetic without overflowing -- maybe we can find 01049 // a rational number with small numerator and denominator that 01050 // approximates nscale/scale without exceeding it? 01051 01052 actualmin = nmin; 01053 actualmax = nmin + (scale / div); 01054 } 01055 else // (scale < nscale) 01056 { 01057 const int mul = nscale / scale; 01058 while (aptr != stop) 01059 { 01060 *aptr = nmin + ((*aptr - mi) * mul); 01061 ++aptr; 01062 } 01063 01064 actualmin = nmin; 01065 actualmax = nmin + (scale * mul); 01066 } 01067 01068 // Don't assign to the pointers until the very end, in case the user 01069 // passes pointers to nmin,nmax as the actualmin/actualmax pointers 01070 ASSERT(actualmin_p != NULL); 01071 ASSERT(actualmax_p != NULL); 01072 *actualmin_p = actualmin; 01073 *actualmax_p = actualmax; 01074 } 01075 01076 // ###################################################################### 01077 Image<int> intgCenterSurround(const Image<int>& center, 01078 const Image<int>& surround, 01079 const bool absol) 01080 { 01081 const int lw = center.getWidth(), lh = center.getHeight(); 01082 const int sw = surround.getWidth(), sh = surround.getHeight(); 01083 01084 if (sw > lw || sh > lh) LFATAL("center must be larger than surround"); 01085 01086 int scalex = lw / sw, remx = lw - 1 - (lw % sw); 01087 int scaley = lh / sh, remy = lh - 1 - (lh % sh); 01088 01089 // result has the size of the larger image: 01090 Image<int> result(center.getDims(), NO_INIT); 01091 01092 // scan large image and subtract corresponding pixel from small image: 01093 int ci = 0, cj = 0; 01094 Image<int>::const_iterator lptr = center.begin(); 01095 Image<int>::const_iterator sptr = surround.begin(); 01096 Image<int>::iterator dptr = result.beginw(); 01097 01098 if (absol == true) // compute abs(hires - lowres): 01099 { 01100 for (int j = 0; j < lh; ++j) 01101 { 01102 for (int i = 0; i < lw; ++i) 01103 { 01104 if (*lptr > *sptr) 01105 *dptr++ = (*lptr++ - *sptr); 01106 else 01107 *dptr++ = (*sptr - *lptr++); 01108 01109 if ((++ci) == scalex && i != remx) { ci = 0; ++sptr; } 01110 } 01111 if (ci) { ci = 0; ++sptr; } // in case the reduction is not round 01112 if ((++cj) == scaley && j != remy) cj = 0; else sptr -= sw; 01113 } 01114 } 01115 else // compute hires - lowres, clamped to 0: 01116 { 01117 for (int j = 0; j < lh; ++j) 01118 { 01119 for (int i = 0; i < lw; ++i) 01120 { 01121 if (*lptr > *sptr) 01122 *dptr++ = (*lptr++ - *sptr); 01123 else 01124 { *dptr++ = 0; lptr++; } 01125 01126 if ((++ci) == scalex && i != remx) { ci = 0; ++sptr; } 01127 } 01128 if (ci) { ci = 0; ++sptr; } // in case the reduction is not round 01129 if ((++cj) == scaley && j != remy) cj = 0; else sptr -= sw; 01130 } 01131 } 01132 01133 // attenuate borders: 01134 intgInplaceAttenuateBorders(result, result.getDims().max() / 20); 01135 01136 return result; 01137 } 01138 01139 // ###################################################################### 01140 void intgGetRGBY(const Image<PixRGB<byte> >& src, 01141 Image<int>& rg, 01142 Image<int>& by, const int threshfactor, 01143 const uint inputbits) 01144 { 01145 // red = [r - (g+b)/2] [.] = clamp between 0 and 255 01146 // green = [g - (r+b)/2] 01147 // blue = [b - (r+g)/2] 01148 // yellow = [2*((r+g)/2 - |r-g| - b)] 01149 01150 ASSERT(src.initialized()); 01151 01152 rg = Image<int>(src.getDims(), NO_INIT); 01153 by = Image<int>(src.getDims(), NO_INIT); 01154 01155 Image<PixRGB<byte> >::const_iterator aptr = src.begin(); 01156 Image<PixRGB<byte> >::const_iterator stop = src.end(); 01157 01158 Image<int>::iterator rgptr = rg.beginw(), byptr = by.beginw(); 01159 01160 const int bitsaftershift = 8*sizeof(int) - 2; // e.g. 30 01161 const int upshift = bitsaftershift - 14; // e.g. 16 01162 const int bitsafterdiv = bitsaftershift - 10; // e.g. 20 01163 const int finalshift = inputbits - bitsafterdiv; 01164 01165 ASSERT(upshift > 0); 01166 01167 while (aptr != stop) 01168 { 01169 const int r = aptr->p[0]; // 8 bits 01170 const int g = aptr->p[1]; 01171 const int b = aptr->p[2]; 01172 ++aptr; 01173 01174 if (threshfactor*(r+g+b) < 3*255) // too dark... no response from anybody 01175 { *rgptr++ = 0; *byptr++ = 0; } 01176 else 01177 { 01178 // first do the luminanceNormalization: 01179 const int lum = r + g + b; // range up to 10 bits 01180 01181 // now compute color opponencies: 01182 int red = 2*r - g - b; // range up to 10 bits 01183 int green = 2*g - r - b; 01184 int blue = 2*b - r - g; 01185 int yellow = -2*blue - 4*abs(r-g); // range up to 11 bits 01186 01187 if (red < 0) red = 0; 01188 if (green < 0) green = 0; 01189 if (blue < 0) blue = 0; 01190 if (yellow < 0) yellow = 0; 01191 01192 // compute differences and normalize chroma by luminance: 01193 01194 // bit counts: the (red-green) and (blue-yellow) differences 01195 // can range up to 12 bits, and after the multiplication by 01196 // 3 can range up to 14 bits, thus the use of 14 in 01197 // computing the upshift value above 01198 01199 if (finalshift > 0) 01200 { 01201 *rgptr++ = ((3*(red - green) << upshift) / lum) << finalshift; 01202 *byptr++ = ((3*(blue - yellow) << upshift) / lum) << finalshift; 01203 } 01204 else if (finalshift < 0) 01205 { 01206 *rgptr++ = ((3*(red - green) << upshift) / lum) >> (-finalshift); 01207 *byptr++ = ((3*(blue - yellow) << upshift) / lum) >> (-finalshift); 01208 } 01209 else 01210 { 01211 *rgptr++ = (3*(red - green) << upshift) / lum; 01212 *byptr++ = (3*(blue - yellow) << upshift) / lum; 01213 } 01214 } 01215 } 01216 } 01217 01218 // ###################################################################### 01219 Image<int> intgShiftImage(const Image<int>& srcImg, 01220 const int dxnumer, const int dynumer, 01221 const uint denombits) 01222 { 01223 // make sure the source image is valid 01224 ASSERT(srcImg.initialized()); 01225 01226 ASSERT(denombits < 8*sizeof(int)); 01227 01228 const int denom = (1 << denombits); 01229 01230 // create and clear the return image 01231 Dims dim(srcImg.getDims()); 01232 int w = dim.w(), h = dim.h(); 01233 Image<int> retImg(dim, ZEROS); 01234 01235 // prepare a couple of variable for the x direction 01236 int xt = dxnumer >= 0 01237 ? (dxnumer >> denombits) 01238 : - ((-dxnumer + denom-1) >> denombits); 01239 int xfrac_numer = dxnumer - (xt << denombits); 01240 int startx = std::max(0,xt); 01241 int endx = std::min(0,xt) + w; 01242 if (xfrac_numer != 0 && abs(denom)/abs(xfrac_numer) > (1 << 30)) 01243 xfrac_numer = 0; 01244 else endx--; 01245 01246 // prepare a couple of variable for the y direction 01247 int yt = dynumer >= 0 01248 ? (dynumer >> denombits) 01249 : - ((-dynumer + denom-1) >> denombits); 01250 int yfrac_numer = dynumer - (yt << denombits); 01251 int starty = std::max(0,yt); 01252 int endy = std::min(0,yt) + h; 01253 if (yfrac_numer != 0 && abs(denom)/abs(yfrac_numer) > (1 << 30)) 01254 yfrac_numer = 0; 01255 else endy--; 01256 01257 // dispatch to faster shiftClean() if displacements are roughly 01258 // integer: 01259 if (xfrac_numer == 0 && yfrac_numer == 0) 01260 return shiftClean(srcImg, xt, yt); 01261 01262 if (xfrac_numer > 0) 01263 { 01264 xfrac_numer = denom - xfrac_numer; 01265 xt++; 01266 } 01267 01268 if (yfrac_numer > 0) 01269 { 01270 yfrac_numer = denom - yfrac_numer; 01271 yt++; 01272 } 01273 01274 // prepare the pointers 01275 Image<int>::const_iterator src, src2 = srcImg.begin(); 01276 Image<int>::iterator ret, ret2 = retImg.beginw(); 01277 if (xt > 0) ret2 += xt; 01278 if (xt < 0) src2 -= xt; 01279 if (yt > 0) ret2 += yt * w; 01280 if (yt < 0) src2 -= yt * w; 01281 01282 // now loop over the images 01283 for (int y = starty; y < endy; ++y) 01284 { 01285 src = src2; ret = ret2; 01286 for (int x = startx; x < endx; ++x) 01287 { 01288 *ret = (((src[0] >> denombits) * (denom - xfrac_numer)) >> denombits) * (denom - yfrac_numer); 01289 *ret += (((src[1] >> denombits) * xfrac_numer) >> denombits) * (denom - yfrac_numer); 01290 *ret += (((src[w] >> denombits) * (denom - xfrac_numer)) >> denombits) * yfrac_numer; 01291 *ret += (((src[w+1] >> denombits) * xfrac_numer) >> denombits) * yfrac_numer; 01292 ++src; ++ret; 01293 } 01294 src2 += w; ret2 += w; 01295 } 01296 return retImg; 01297 } 01298 01299 // ###################################################################### 01300 // ##### IntgGaussianPyrBuilder Functions: 01301 // ###################################################################### 01302 01303 IntgGaussianPyrBuilder::IntgGaussianPyrBuilder(const int filter_size, 01304 const integer_math* imath) : 01305 PyrBuilder<int>(), 01306 itsFiltSize(filter_size), 01307 itsImath(imath) 01308 {} 01309 01310 ImageSet<int> IntgGaussianPyrBuilder::build(const Image<int>& img, 01311 const int firstlevel, 01312 const int depth, 01313 PyramidCache<int>* cache) 01314 { 01315 ASSERT(cache != 0); // FIXME remove this 01316 01317 const ImageSet<int>* const cached = 01318 (cache != 0 && itsFiltSize == 5) 01319 ? cache->gaussian5.get(img) // may be null if there is no cached pyramid 01320 : 0; 01321 01322 return (cached != 0) 01323 ? *cached 01324 : intgBuildPyrGaussian(img, depth, itsFiltSize, itsImath); 01325 } 01326 01327 IntgGaussianPyrBuilder* IntgGaussianPyrBuilder::clone() const 01328 { 01329 return new IntgGaussianPyrBuilder(*this); 01330 } 01331 01332 // ###################################################################### 01333 // ##### IntgOrientedPyrBuilder Functions: 01334 // ###################################################################### 01335 01336 IntgOrientedPyrBuilder::IntgOrientedPyrBuilder(const int filter_size, 01337 const float theta, 01338 const integer_math* imath) : 01339 PyrBuilder<int>(), 01340 itsFiltSize(filter_size), 01341 itsAngle(theta), 01342 itsImath(imath) 01343 {} 01344 01345 ImageSet<int> IntgOrientedPyrBuilder::build(const Image<int>& img, 01346 const int firstlevel, 01347 const int depth, 01348 PyramidCache<int>* cache) 01349 { 01350 ASSERT(cache != 0); // FIXME remove this 01351 01352 const ImageSet<int>* const lplc = 01353 (cache != 0 && itsFiltSize == 9) 01354 ? cache->laplacian9.get(img) // may be null if there is no cached pyramid 01355 : 0; 01356 01357 return lplc != 0 01358 ? intgBuildPyrOrientedFromLaplacian(*lplc, itsFiltSize, itsAngle, 01359 itsImath) 01360 : intgBuildPyrOriented(img, firstlevel, depth, 01361 itsFiltSize, itsAngle, itsImath); 01362 } 01363 01364 IntgOrientedPyrBuilder* IntgOrientedPyrBuilder::clone() const 01365 { 01366 return new IntgOrientedPyrBuilder(*this); 01367 } 01368 01369 // ###################################################################### 01370 // ##### IntgReichardtPyrBuilder Functions: 01371 // ###################################################################### 01372 IntgReichardtPyrBuilder:: 01373 IntgReichardtPyrBuilder(const int dxnumer, const int dynumer, 01374 const uint denombits, 01375 const integer_math* imath) : 01376 PyrBuilder<int>(), 01377 itsDXnumer(dxnumer), itsDYnumer(dynumer), itsDenomBits(denombits), 01378 itsImath(imath) 01379 {} 01380 01381 ImageSet<int> IntgReichardtPyrBuilder::build(const Image<int>& image, 01382 const int firstlevel, 01383 const int depth, 01384 PyramidCache<int>* cache) 01385 { 01386 const ImageSet<int>* const cached = 01387 cache != 0 01388 ? cache->gaussian5.get(image) // may be null if there is no cached pyramid 01389 : 0; 01390 01391 // create a pyramid with the input image 01392 ImageSet<int> upyr = 01393 cached != 0 01394 ? *cached 01395 : intgBuildPyrGaussian(image, depth, 5, itsImath); 01396 01397 // create an empty pyramid 01398 ImageSet<int> spyr(depth); 01399 01400 // fill the empty pyramid with the shifted version 01401 for (int i = firstlevel; i < depth; ++i) 01402 spyr[i] = intgShiftImage(upyr[i], itsDXnumer, itsDYnumer, 01403 itsDenomBits); 01404 01405 // store both pyramids in the deques 01406 unshifted.push_back(upyr); 01407 shifted.push_back(spyr); 01408 01409 ImageSet<int> result(depth); 01410 01411 // so, it's our first time? Pretend the pyramid before this was 01412 // the same as the current one ... 01413 if (unshifted.size() == 1) 01414 { 01415 unshifted.push_back(upyr); 01416 shifted.push_back(spyr); 01417 } 01418 01419 // need to pop off old pyramid? 01420 if (unshifted.size() == 3) 01421 { 01422 unshifted.pop_front(); 01423 shifted.pop_front(); 01424 } 01425 01426 // compute the Reichardt maps 01427 for (int i = firstlevel; i < depth; i++) 01428 { 01429 result[i] = Image<int>(unshifted.back()[i].getDims(), NO_INIT); 01430 const int sz = result[i].getSize(); 01431 01432 Image<int>::iterator rptr = result[i].beginw(); 01433 Image<int>::const_iterator ubptr = unshifted.back()[i].begin(); 01434 Image<int>::const_iterator ufptr = unshifted.front()[i].begin(); 01435 Image<int>::const_iterator sbptr = shifted.back()[i].begin(); 01436 Image<int>::const_iterator sfptr = shifted.front()[i].begin(); 01437 01438 // scale bit depth down to avoid overflow when we multiply 01439 const uint nshift = (itsImath->nbits+1)/2; 01440 01441 for (int j = 0; j < sz; ++j) 01442 rptr[j] = 01443 ((ubptr[j] >> nshift) * (sfptr[j] >> nshift)) - 01444 ((ufptr[j] >> nshift) * (sbptr[j] >> nshift)); 01445 } 01446 01447 return result; 01448 } 01449 01450 // ###################################################################### 01451 IntgReichardtPyrBuilder* IntgReichardtPyrBuilder::clone() const 01452 { 01453 return new IntgReichardtPyrBuilder(*this); 01454 } 01455 01456 // ###################################################################### 01457 void IntgReichardtPyrBuilder::reset() 01458 { 01459 shifted.clear(); 01460 unshifted.clear(); 01461 } 01462 01463 // ###################################################################### 01464 /* So things look consistent in everyone's emacs... */ 01465 /* Local Variables: */ 01466 /* mode: c++ */ 01467 /* indent-tabs-mode: nil */ 01468 /* End: */ 01469 01470 #endif // IMAGE_INTEGERMATHOPS_C_DEFINED