MatrixOps.C

Go to the documentation of this file.
00001 /*!@file Image/MatrixOps.C Matrix operations on Image
00002  */
00003 
00004 // //////////////////////////////////////////////////////////////////// //
00005 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the //
00006 // University of Southern California (USC) and the iLab at USC.         //
00007 // See http://iLab.usc.edu for information about this project.          //
00008 // //////////////////////////////////////////////////////////////////// //
00009 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00010 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00011 // in Visual Environments, and Applications'' by Christof Koch and      //
00012 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00013 // pending; application number 09/912,225 filed July 23, 2001; see      //
00014 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00015 // //////////////////////////////////////////////////////////////////// //
00016 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00017 //                                                                      //
00018 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00019 // redistribute it and/or modify it under the terms of the GNU General  //
00020 // Public License as published by the Free Software Foundation; either  //
00021 // version 2 of the License, or (at your option) any later version.     //
00022 //                                                                      //
00023 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00024 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00025 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00026 // PURPOSE.  See the GNU General Public License for more details.       //
00027 //                                                                      //
00028 // You should have received a copy of the GNU General Public License    //
00029 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00030 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00031 // Boston, MA 02111-1307 USA.                                           //
00032 // //////////////////////////////////////////////////////////////////// //
00033 //
00034 // Primary maintainer for this file: Laurent Itti <itti@usc.edu>
00035 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Image/MatrixOps.C $
00036 // $Id: MatrixOps.C 14720 2011-04-12 19:58:53Z itti $
00037 
00038 #include "Image/MatrixOps.H"
00039 
00040 // WARNING: Try not include any other "Image*Ops.H" headers here -- if
00041 // you find that you are needing such a header, then the function
00042 // you're writing probably belongs outside Image_MatrixOps.C.
00043 #include "Image/Image.H"
00044 #include "Image/Pixels.H"
00045 #include "Image/lapack.H" // for BLAS and LAPACK decls (e.g. dgemm_())
00046 #include "Util/Assert.H"
00047 #include "Util/MathFunctions.H"
00048 #include "rutz/trace.h"
00049 
00050 namespace
00051 {
00052   // #################################################################
00053   // hand-rolled implementation of matrix-matrix multiplication
00054   template <class T>
00055   Image<typename promote_trait<T,T>::TP>
00056   basic_vm_mul(const Image<T>& v, const Image<T>& M)
00057   {
00058   GVX_TRACE(__PRETTY_FUNCTION__);
00059 
00060     typedef typename promote_trait<T,T>::TP TF;
00061 
00062     const int wv = v.getWidth(), hv = v.getHeight();
00063     const int wm = M.getWidth(), hm = M.getHeight();
00064 
00065     ASSERT(wv == hm);
00066     ASSERT(hv == 1);
00067 
00068     Image<TF> y(wm, hv /* == 1 */, ZEROS);
00069 
00070     // for efficiency, use pointers to the input matrices' raw storage
00071     const T* const vptr = v.getArrayPtr();
00072     const T* const mptr = M.getArrayPtr();
00073     TF*      const yptr = y.getArrayPtr();
00074 
00075     for (int xv = 0; xv < wv; ++xv)
00076       {
00077         const T* const mrow = mptr + xv*wm;
00078 
00079         const T vval = vptr[xv];
00080         if (vval != 0.0)
00081           for (int xm = 0; xm < wm; ++xm)
00082             yptr[xm] += vval * mrow[xm];
00083       }
00084 
00085     return y;
00086   }
00087 
00088   // #################################################################
00089   // hand-rolled implementation of matrix-matrix multiplication
00090   template <class T>
00091   Image<typename promote_trait<T,T>::TP>
00092   basic_mm_mul(const Image<T>& A, const Image<T>& B)
00093   {
00094   GVX_TRACE(__PRETTY_FUNCTION__);
00095 
00096     typedef typename promote_trait<T,T>::TP TF;
00097 
00098     const int wa = A.getWidth(), ha = A.getHeight();
00099     const int wb = B.getWidth(), hb = B.getHeight();
00100     ASSERT(wa == hb);
00101 
00102     Image<TF> C(wb, ha, ZEROS);
00103 
00104     // for efficiency, use pointers to the input matrices' raw storage
00105     const T* const aptr = A.getArrayPtr();
00106     const T* const bptr = B.getArrayPtr();
00107     TF*      const cptr = C.getArrayPtr();
00108 
00109     /*
00110       NOTE: in principle, we are doing the following loop
00111 
00112       for (int xa = 0; xa < wa; ++xa)
00113       for (int yc = 0; yc < ha; ++yc)
00114       for (int xc = 0; xc < wb; ++xc)
00115       cptr[xc + yc*wb] += aptr[xa + yc*wa] * bptr[xc + xa*wb];
00116 
00117       and in principle, we could put the individual for-loops in any
00118       order we want, since the inner loop computation is the same.
00119 
00120       However, in practice, there is a significant cpu efficiency
00121       advantage to be gained by ordering the loops as done below, with
00122       the 'xc' loop as the inner loop. That way, we access both the B
00123       and C arrays with a stride of 1 in the inner loop. With a
00124       different ordering, we would be accessing only one of the three
00125       arrays with stride 1 in the inner loop, and this hurts
00126       performance because it pessimizes memory cache retrievals.
00127     */
00128 
00129     for (int xa = 0; xa < wa; ++xa)
00130       {
00131         const T* const bpos = bptr + xa*wb;
00132 
00133         for (int yc = 0; yc < ha; ++yc)
00134           {
00135             const T aval = aptr[xa + yc*wa];
00136             TF* const cpos = cptr + yc*wb;
00137             if (aval != 0.0)
00138               for (int xc = 0; xc < wb; ++xc)
00139                 cpos[xc] += aval * bpos[xc];
00140           }
00141       }
00142 
00143     return C;
00144   }
00145 
00146 }
00147 
00148 // ######################################################################
00149 template <class T>
00150 Image<typename promote_trait<T,T>::TP>
00151 vmMult(const Image<T>& v, const Image<T>& M)
00152 {
00153   return basic_vm_mul(v, M);
00154 }
00155 
00156 #ifdef HAVE_LAPACK
00157 
00158 // specializations to use lapack functions if available
00159 
00160 template <>
00161 Image<double>
00162 vmMult(const Image<double>& v, const Image<double>& M)
00163 {
00164   return lapack::dgemv(&v, &M);
00165 }
00166 
00167 template <>
00168 Image<float>
00169 vmMult(const Image<float>& v, const Image<float>& M)
00170 {
00171   return lapack::sgemv(&v, &M);
00172 }
00173 
00174 #endif // HAVE_LAPACK
00175 
00176 // ######################################################################
00177 template <class T>
00178 Image<typename promote_trait<T,T>::TP>
00179 matrixMult(const Image<T>& A, const Image<T>& B)
00180 {
00181   return basic_mm_mul(A, B);
00182 }
00183 
00184 #ifdef HAVE_LAPACK
00185 
00186 // specializations to use lapack functions if available
00187 
00188 template <>
00189 Image<double>
00190 matrixMult(const Image<double>& A, const Image<double>& B)
00191 {
00192   return lapack::dgemm(&A, &B);
00193 }
00194 
00195 template <>
00196 Image<float>
00197 matrixMult(const Image<float>& A, const Image<float>& B)
00198 {
00199   return lapack::sgemm(&A, &B);
00200 }
00201 
00202 #endif // HAVE_LAPACK
00203 
00204 // ######################################################################
00205 template <class T>
00206 Image<typename promote_trait<T,T>::TP>
00207 matrixMult(const Image<T>& A, const Image<T>& B,
00208            const uint beginAX, const uint endAX,
00209            const uint beginBX, const uint endBX,
00210            const uint beginAY, const uint endAY)
00211 {
00212 GVX_TRACE(__PRETTY_FUNCTION__);
00213   ASSERT(A.initialized()); ASSERT(B.initialized());
00214   ASSERT(beginAX <= endAX); ASSERT(beginBX <= endBX);
00215   ASSERT(beginAY <= endAY);
00216   const uint hb = B.getHeight(); ASSERT(hb == endAX - beginAX + 1);
00217   typedef typename promote_trait<T,T>::TP TF;
00218 
00219   Image<TF> C(endBX - beginBX, endAY - beginAY, ZEROS);
00220   typename Image<TF>::iterator pc = C.beginw();
00221   uint ya;
00222 
00223   for (uint yc = beginAY; yc < endAY; yc++)
00224     for (uint xc = beginBX; xc < endBX; xc++, ++pc)
00225       {
00226         ya = 0;
00227         for (uint xa = beginAX; xa < endAX; xa++ , ya++)
00228           *pc += A.getVal(xa, yc) * B.getVal(xc, ya);
00229       }
00230   return C;
00231 }
00232 
00233 // ######################################################################
00234 template <class T_or_RGB>
00235 Image<T_or_RGB> transpose(const Image<T_or_RGB>& M)
00236 {
00237 GVX_TRACE(__PRETTY_FUNCTION__);
00238   const int w = M.getWidth(), h = M.getHeight();
00239   Image<T_or_RGB> result(h, w, NO_INIT);
00240   typename Image<T_or_RGB>::const_iterator mptr, mptr2 = M.begin();
00241   typename Image<T_or_RGB>::iterator rptr = result.beginw();
00242 
00243   for (int y = 0; y < w; ++y)
00244     {
00245       mptr = mptr2;
00246       for (int x = 0; x < h; ++x)
00247         {
00248           *rptr = *mptr;
00249           ++rptr; mptr += w;
00250         }
00251       ++mptr2;
00252     }
00253 
00254   return result;
00255 }
00256 
00257 // ######################################################################
00258 template <class T_or_RGB>
00259 Image<T_or_RGB> flipHoriz(const Image<T_or_RGB>& img)
00260 {
00261 GVX_TRACE(__PRETTY_FUNCTION__);
00262   const int w = img.getWidth(), h = img.getHeight();
00263   Image<T_or_RGB> result(w, h, NO_INIT);
00264 
00265   typename Image<T_or_RGB>::const_iterator sptr = img.begin();
00266   typename Image<T_or_RGB>::iterator dptr = result.beginw() + w - 1;
00267 
00268   for (int y = 0; y < h; ++y)
00269     {
00270       for (int x = 0; x < w; ++x) *dptr-- = *sptr++;
00271       dptr += 2 * w;
00272     }
00273 
00274   return result;
00275 }
00276 
00277 // ######################################################################
00278 template <class T_or_RGB>
00279 Image<T_or_RGB> flipVertic(const Image<T_or_RGB>& img)
00280 {
00281 GVX_TRACE(__PRETTY_FUNCTION__);
00282   const int w = img.getWidth(), h = img.getHeight();
00283   Image<T_or_RGB> result(w, h, NO_INIT);
00284 
00285   typename Image<T_or_RGB>::const_iterator sptr = img.begin();
00286   typename Image<T_or_RGB>::iterator dptr = result.beginw() + w * (h-1);
00287 
00288   for (int y = 0; y < h; ++y)
00289     {
00290       // NOTE: we may revert to a more object-safe implementation if
00291       // the need arises...
00292       memcpy(dptr, sptr, w * sizeof(T_or_RGB));
00293       sptr += w;  dptr -= w;
00294     }
00295 
00296   return result;
00297 }
00298 
00299 // ######################################################################
00300 template <class T>
00301 Image<T> eye(const uint size)
00302 {
00303 GVX_TRACE(__PRETTY_FUNCTION__);
00304   Image<T> result(size, size, ZEROS);
00305   typename Image<T>::iterator rptr = result.beginw();
00306   T one(1);
00307 
00308   for (uint i = 0; i < size; ++i)
00309     {
00310       *rptr++ = one;
00311       rptr += size;
00312     }
00313 
00314   return result;
00315 }
00316 
00317 // ######################################################################
00318 template <class T>
00319 typename promote_trait<T,T>::TP trace(const Image<T>& M)
00320 {
00321 GVX_TRACE(__PRETTY_FUNCTION__);
00322   ASSERT(M.isSquare());
00323   typename promote_trait<T,T>::TP result(0);
00324   typename Image<T>::const_iterator ptr = M.begin();
00325   const uint size = M.getWidth();
00326 
00327   for (uint i = 0; i < size; ++i)
00328     {
00329       result += *ptr++;
00330       ptr += size;
00331     }
00332 
00333   return result;
00334 }
00335 
00336 // ######################################################################
00337 template <class T>
00338 int matrixPivot(Image<T>& M, const int y)
00339 {
00340 GVX_TRACE(__PRETTY_FUNCTION__);
00341   ASSERT(M.isSquare()); const int siz = M.getWidth();
00342   ASSERT(y < siz);
00343 
00344   // find the element with largest absolute value in the column y and
00345   // below row y:
00346   int bestj = siz+1; T maxi(0);
00347   for (int j = y; j < siz; j ++)
00348     {
00349       const T tmp = std::abs(M.getVal(y, j));
00350       if (tmp > maxi) { maxi = tmp; bestj = j; }
00351     }
00352 
00353   // if we did not find any non-zero value, let the caller know:
00354   if (bestj == siz + 1) return -1;
00355 
00356   // if our pivot is at the given y, do nothing and return that y:
00357   if (bestj == y) return y;
00358 
00359   T* M_arr = M.getArrayPtr();
00360 
00361   // otherwise, exchange rows y and bestj in M, and return bestj:
00362   for (int i = 0; i < siz; i ++)
00363     {
00364       std::swap(M_arr[i + bestj*siz],
00365                 M_arr[i + y*siz]);
00366     }
00367 
00368   return bestj;
00369 }
00370 
00371 // ######################################################################
00372 template <class T>
00373 Image<typename promote_trait<T, float>::TP> matrixInv(const Image<T>& M)
00374 {
00375 GVX_TRACE(__PRETTY_FUNCTION__);
00376 
00377   // NOTE: In the future if we need a faster matrix inverse, we could
00378   // specialize for double and float to use dgetri() and sgetri(),
00379   // respectively, from lapack. This would be analogous to the
00380   // specializations that we currently have for matrix-matrix and
00381   // vector-matrix multiplication in this file, as well as the
00382   // specializations for svd that we have in LinearAlgebra.C.
00383 
00384   ASSERT(M.isSquare());
00385   typedef typename promote_trait<T, float>::TP TF;
00386   const int siz = M.getWidth();
00387 
00388   // get an identity matrix:
00389   Image<TF> res = eye<TF>(siz);
00390 
00391   // make a promoted copy of M that we can modify:
00392   Image<TF> m(M);
00393 
00394   // for efficiency, we'll now pull pointers to the raw storage of our
00395   // mutable M copy, and of our result matrix -- this way, we can use
00396   // array indexing without all of the ASSERT()s in Image::getVal()
00397   // and Image::setVal() -- this turns out to cut the run time by
00398   // about a factor of 8x
00399   TF* m_arr   = m.getArrayPtr();
00400   TF* res_arr = res.getArrayPtr();
00401 
00402   // ONLY USE THIS MACRO LOCALLY WITHIN THIS FUNCTION! It relies on
00403   // the local 'siz' variable, and on the fact that any array for
00404   // which it is called is a square matrix with dimensions siz*siz.
00405 #define ARR_IDX(arrptr, i, j) ((arrptr)[(i) + ((j)*siz)])
00406 
00407   // let's pivote!
00408   for (int k = 0; k < siz; k ++)
00409     {
00410       // pivote at k:
00411       const int piv = matrixPivot(m, k);
00412 
00413       // could we pivote?
00414       if (piv == -1)
00415         throw SingularMatrixException(M, SRC_POS);
00416 
00417       // if we did pivote, then let's also exchange rows piv and k in temp:
00418       if (piv != k)
00419         for (int i = 0; i < siz; i ++)
00420           {
00421             std::swap(ARR_IDX(res_arr, i, piv),
00422                       ARR_IDX(res_arr, i, k));
00423           }
00424 
00425       // divide row k by our pivot value in both matrices:
00426       const TF val = 1.0F / ARR_IDX(m_arr, k, k);
00427       for (int i = 0; i < siz; i ++)
00428         {
00429           ARR_IDX(m_arr,   i, k)  *= val;
00430           ARR_IDX(res_arr, i, k)  *= val;
00431         }
00432 
00433       // make sure everybody else in that column becomes zero in the
00434       // original matrix:
00435       for (int j = 0; j < siz; j ++)
00436         if (j != k)
00437           {
00438             const TF v = ARR_IDX(m_arr, k, j);
00439             for (int i = 0; i < siz; i ++)
00440               {
00441                 ARR_IDX(m_arr,   i, j)  -= ARR_IDX(m_arr,   i, k) * v;
00442                 ARR_IDX(res_arr, i, j)  -= ARR_IDX(res_arr, i, k) * v;
00443               }
00444           }
00445     }
00446   return res;
00447 
00448 #undef ARR_IDX
00449 }
00450 
00451 // ######################################################################
00452 template <class T>
00453 typename promote_trait<T,T>::TP dotprod(const Image<T>& A, const Image<T>& B)
00454 {
00455 GVX_TRACE(__PRETTY_FUNCTION__);
00456   ASSERT(A.isSameSize(B));
00457   typename promote_trait<T,T>::TP result = 0;
00458   typename Image<T>::const_iterator aptr = A.begin(),
00459     stop = A.end(), bptr = B.begin();
00460 
00461   while (aptr != stop) result += (*aptr++) * (*bptr++);
00462 
00463   return result;
00464 }
00465 
00466 // ######################################################################
00467 template <class T>
00468 typename promote_trait<T, float>::TP matrixDet(const Image<T>& M)
00469 {
00470 GVX_TRACE(__PRETTY_FUNCTION__);
00471   ASSERT(M.isSquare()); const int siz = M.getWidth();
00472   typedef typename promote_trait<T, float>::TP TF;
00473 
00474   // get a local copy we can modify:
00475   Image<TF> tmp = M;
00476   TF det = 1;
00477 
00478    for (int k = 0; k < siz; k++)
00479      {
00480        int idx = matrixPivot(tmp, k);
00481        if (idx == -1) return 0; // singular matrix
00482        if (idx != 0) det = -det;
00483 
00484        TF pv = tmp.getVal(k, k);
00485        det *= pv;
00486        for (int j = k+1; j < siz; j++)
00487          {
00488            TF piv = TF(tmp.getVal(k, j)) / pv;
00489 
00490            for (int i = k+1; i < siz; i++)
00491              tmp.setVal(i, j, tmp.getVal(i, j) - piv * tmp.getVal(i, k));
00492          }
00493      }
00494    return det;
00495 }
00496 
00497 // Include the explicit instantiations
00498 #include "inst/Image/MatrixOps.I"
00499 
00500 template Image<promote_trait<double, double>::TP> vmMult(const Image<double>&,const Image<double>&);
00501 template Image<promote_trait<double, double>::TP> matrixMult(const Image<double>&,const Image<double>&);
00502 template Image<double> transpose(const Image<double>&);
00503 template Image<short> transpose(const Image<short> &);
00504 template Image<unsigned short> transpose(const Image<unsigned short> &);
00505 template promote_trait<double, double>::TP trace(const Image<double>&);
00506 template Image<promote_trait<double, float>::TP> matrixInv(const Image<double>&);
00507 
00508 // ######################################################################
00509 /* So things look consistent in everyone's emacs... */
00510 /* Local Variables: */
00511 /* indent-tabs-mode: nil */
00512 /* End: */
Generated on Sun May 8 08:40:56 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3