RgbConversion.C

Go to the documentation of this file.
00001 /*!@file Video/RgbConversion.C Raw conversion between video formats and RGB images */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Rob Peters <rjpeters at usc dot edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Video/RgbConversion.C $
00035 // $Id: RgbConversion.C 12962 2010-03-06 02:13:53Z irock $
00036 //
00037 
00038 #ifndef VIDEO_RGBCONVERSION_C_DEFINED
00039 #define VIDEO_RGBCONVERSION_C_DEFINED
00040 
00041 #include "Video/RgbConversion.H"
00042 
00043 #include "Image/JPEGUtil.H"
00044 #include "Image/Dims.H"
00045 #include "Image/Image.H"
00046 #include "Image/Pixels.H"
00047 #include "Image/color_conversions.H" // for yv12_to_rgb24_c()
00048 #include "Util/log.H"
00049 #include "rutz/error_context.h" // for GVX_ERR_CONTEXT
00050 #include "rutz/trace.h"
00051 
00052 namespace
00053 {
00054   // ######################################################################
00055   void checkBufferLength(const size_t actual, const size_t expected)
00056   {
00057     if (actual < expected)
00058       LFATAL("input buffer is too short (got %"ZU", expected %"ZU")",
00059              actual, expected);
00060 
00061     if (actual > expected)
00062       LINFO("input buffer is longer than expected (got %"ZU", expected %"ZU")\n"
00063             "(this is not a fatal error, but make sure the width and height are correct)",
00064             actual, expected);
00065   }
00066 }
00067 
00068 // ######################################################################
00069 template <class T>
00070 Image<PixRGB<T> > fromRGB(const T* data, const size_t length,
00071                           const Dims& dims, const bool byteswap)
00072 {
00073 GVX_TRACE(__PRETTY_FUNCTION__);
00074 
00075   GVX_ERR_CONTEXT("converting from RGB to RGB");
00076 
00077   checkBufferLength(length, dims.sz() * 3);
00078 
00079   // dirty hack: assumes that we know the internal representation of PixRGB!
00080   Image< PixRGB<T> > dest(dims, NO_INIT);
00081 
00082   if (byteswap)
00083     {
00084       typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00085       typename Image<PixRGB<T> >::iterator stop = dest.endw();
00086 
00087       checkBufferLength(length, dims.sz() * 3);
00088 
00089       while(aptr != stop)
00090         {
00091           // data stored as: b, g, r
00092           (*aptr++).set(data[2], data[1], data[0]);
00093           data += 3;
00094         }
00095     }
00096   else
00097     {
00098       memcpy(dest.getArrayPtr(), data, dims.sz() * 3);
00099     }
00100 
00101   return dest;
00102 }
00103 
00104 // ######################################################################
00105 template <class T>
00106 Image<PixRGB<T> > fromARGB(const T* data, const size_t length,
00107                            const Dims& dims, const bool byteswap)
00108 {
00109 GVX_TRACE(__PRETTY_FUNCTION__);
00110 
00111   GVX_ERR_CONTEXT("converting from ARGB to RGB");
00112 
00113   checkBufferLength(length, dims.sz() * 4);
00114 
00115   Image< PixRGB<T> > dest(dims, NO_INIT);
00116   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00117   typename Image<PixRGB<T> >::iterator stop = dest.endw();
00118 
00119   if (byteswap)
00120     {
00121       while(aptr != stop)
00122         {
00123           // data stored as: b, g, r, alpha
00124           (*aptr++).set(data[2], data[1], data[0]);
00125           data += 4;
00126         }
00127     }
00128   else
00129     {
00130       while(aptr != stop)
00131         {
00132           // data stored as: alpha, r, g, b
00133           (*aptr++).set(data[1], data[2], data[3]);
00134           data += 4;
00135         }
00136     }
00137 
00138   return dest;
00139 }
00140 
00141 // ######################################################################
00142 Image<PixRGB<byte> > fromRGB555(const byte* data, const size_t length,
00143                                 const Dims& dims, const bool byteswap)
00144 {
00145 GVX_TRACE(__PRETTY_FUNCTION__);
00146 
00147   GVX_ERR_CONTEXT("converting from RGB555 to RGB");
00148 
00149   checkBufferLength(length, dims.sz() * 2);
00150 
00151   Image<PixRGB<byte> > dest(dims, NO_INIT);
00152   Image<PixRGB<byte> >::iterator aptr = dest.beginw();
00153   Image<PixRGB<byte> >::iterator stop = dest.endw();
00154 
00155   if (byteswap)
00156     {
00157       while(aptr != stop)
00158         {
00159           // data stored as: 1 ignored bit, 5-bit R, 5-bit G, 5-bit B
00160           int rgb = (int(data[1]) << 8) | data[0];
00161           byte r = byte((rgb & 0x7C00) >> 7);
00162           byte g = byte((rgb & 0x03E0) >> 2);
00163           byte b = byte((rgb & 0x001F) << 3);
00164           (*aptr++).set(r, g, b);
00165           data += 2;
00166         }
00167     }
00168   else
00169     {
00170       while(aptr != stop)
00171         {
00172           // data stored as: 1 ignored bit, 5-bit R, 5-bit G, 5-bit B
00173           int rgb = (int(data[0]) << 8) | data[1];
00174           byte r = byte((rgb & 0x7C00) >> 7);
00175           byte g = byte((rgb & 0x03E0) >> 2);
00176           byte b = byte((rgb & 0x001F) << 3);
00177           (*aptr++).set(r, g, b);
00178           data += 2;
00179         }
00180     }
00181 
00182   return dest;
00183 }
00184 
00185 // ######################################################################
00186 Image<PixRGB<byte> > fromRGB565(const byte* data, const size_t length,
00187                                 const Dims& dims, const bool byteswap)
00188 {
00189 GVX_TRACE(__PRETTY_FUNCTION__);
00190 
00191   GVX_ERR_CONTEXT("converting from RGB565 to RGB");
00192 
00193   checkBufferLength(length, dims.sz() * 2);
00194 
00195   Image< PixRGB<byte> > dest(dims, NO_INIT);
00196   Image<PixRGB<byte> >::iterator aptr = dest.beginw();
00197   Image<PixRGB<byte> >::iterator stop = dest.endw();
00198 
00199   if (byteswap)
00200     {
00201       while(aptr != stop)
00202         {
00203           // data stored as: 5-bit R, 6-bit G, 5-bit B
00204           int rgb = (int(data[1]) << 8) | data[0];
00205           byte r = byte((rgb & 0xF800) >> 8);
00206           byte g = byte((rgb & 0x07E0) >> 3);
00207           byte b = byte((rgb & 0x001F) << 3);
00208           (*aptr++).set(r, g, b);
00209           data += 2;
00210         }
00211     }
00212   else
00213     {
00214       while(aptr != stop)
00215         {
00216           // data stored as: 5-bit R, 6-bit G, 5-bit B
00217           int rgb = (int(data[0]) << 8) | data[1];
00218           byte r = byte((rgb & 0xF800) >> 8);
00219           byte g = byte((rgb & 0x07E0) >> 3);
00220           byte b = byte((rgb & 0x001F) << 3);
00221           (*aptr++).set(r, g, b);
00222           data += 2;
00223         }
00224     }
00225 
00226   return dest;
00227 }
00228 
00229 // ######################################################################
00230 template <class T>
00231 Image<PixRGB<T> > fromVideoYUV24(const T* data, const size_t length,
00232                                  const Dims& dims, const bool byteswap)
00233 {
00234 GVX_TRACE(__PRETTY_FUNCTION__);
00235 
00236   GVX_ERR_CONTEXT("converting from YUV24 to RGB");
00237 
00238   checkBufferLength(length, dims.sz() * 3);
00239 
00240   Image< PixRGB<T> > dest(dims, NO_INIT);
00241   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00242   typename Image<PixRGB<T> >::iterator stop = dest.endw();
00243 
00244   if (byteswap)
00245     {
00246       while (aptr != stop)
00247         {
00248           // data stored as: y0, v0, u0, v1, v1, u1
00249           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[0], data[2], data[1]));
00250           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[3], data[5], data[4]));
00251           data += 6;
00252         }
00253     }
00254   else
00255     {
00256       while (aptr != stop)
00257         {
00258           // data stored as: y0, u0, v0, v1, u1, v1
00259           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[0], data[1], data[2]));
00260           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[3], data[4], data[5]));
00261           data += 6;
00262         }
00263     }
00264 
00265   return dest;
00266 }
00267 
00268 // ######################################################################
00269 template <class T>
00270 Image<PixRGB<T> > fromVideoYUV444(const T* data, const size_t length,
00271                                   const Dims& dims, const bool byteswap)
00272 {
00273 GVX_TRACE(__PRETTY_FUNCTION__);
00274 
00275   GVX_ERR_CONTEXT("converting from YUV444 to RGB");
00276 
00277   checkBufferLength(length, dims.sz() * 3);
00278 
00279   Image< PixRGB<T> > dest(dims, NO_INIT);
00280   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00281   typename Image<PixRGB<T> >::iterator stop = dest.endw();
00282 
00283   if (byteswap)
00284     {
00285       while(aptr != stop)
00286         {
00287           // data stored as: y0, u0, u1, v0, v1, y1
00288           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[0], data[1], data[3]));
00289           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[5], data[2], data[4]));
00290           data += 6;
00291         }
00292     }
00293   else
00294     {
00295       while(aptr != stop)
00296         {
00297           // data stored as: u0, y0, v0, u1, y1, v1
00298           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[1], data[0], data[2]));
00299           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[4], data[3], data[5]));
00300           data += 6;
00301         }
00302     }
00303 
00304   return dest;
00305 }
00306 
00307 // ######################################################################
00308 template <>
00309 Image<PixRGB<byte> > fromVideoYUV422(const byte* data, const size_t length,
00310                                      const Dims& dims, const bool byteswap)
00311 {
00312 GVX_TRACE(__PRETTY_FUNCTION__);
00313 
00314   GVX_ERR_CONTEXT("converting from YUV422(byte) to RGB");
00315 
00316   checkBufferLength(length, dims.sz() * 2);
00317 
00318   Image< PixRGB<byte> > dest(dims, NO_INIT);
00319 
00320   yuv422_to_rgb24_c(reinterpret_cast<byte*>(dest.getArrayPtr()),
00321                     dims.w(), dims.h(), data, byteswap);
00322 
00323   return dest;
00324 }
00325 
00326 // ######################################################################
00327 template <class T>
00328 Image<PixRGB<T> > fromVideoYUV422(const T* data, const size_t length,
00329                                   const Dims& dims, const bool byteswap)
00330 {
00331 GVX_TRACE(__PRETTY_FUNCTION__);
00332 
00333   GVX_ERR_CONTEXT("converting from YUV422 to RGB");
00334 
00335   checkBufferLength(length, dims.sz() * 2);
00336 
00337   Image< PixRGB<T> > dest(dims, NO_INIT);
00338   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00339   typename Image<PixRGB<T> >::iterator stop = dest.endw();
00340 
00341   if (byteswap)
00342     {
00343       while(aptr != stop)
00344         {
00345           // data stored as:  y0, u, y1, v
00346           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[0], data[1], data[3]));
00347           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[2], data[1], data[3]));
00348           data += 4;
00349         }
00350     }
00351   else
00352     {
00353       while(aptr != stop)
00354         {
00355           // data stored as: u, y0, v, y1
00356           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[1], data[0], data[2]));
00357           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[3], data[0], data[2]));
00358           data += 4;
00359         }
00360     }
00361 
00362   return dest;
00363 }
00364 
00365 // ######################################################################
00366 template <class T>
00367 Image<PixRGB<T> > fromVideoYUV411(const T* data, const size_t length,
00368                                   const Dims& dims, const bool byteswap)
00369 {
00370 GVX_TRACE(__PRETTY_FUNCTION__);
00371 
00372   GVX_ERR_CONTEXT("converting from YUV411 to RGB");
00373 
00374   checkBufferLength(length, dims.sz() * 3 / 2);
00375 
00376   Image< PixRGB<T> > dest(dims, NO_INIT);
00377   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00378   typename Image<PixRGB<T> >::iterator stop = dest.endw();
00379 
00380   if (byteswap)
00381     {
00382       while(aptr != stop)
00383         {
00384           // data stored as: y0, u, v, y1, y3, y2
00385           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[0], data[1], data[2]));
00386           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[3], data[1], data[2]));
00387           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[5], data[1], data[2]));
00388           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[4], data[1], data[2]));
00389           data += 6;
00390         }
00391     }
00392   else
00393     {
00394       while(aptr != stop)
00395         {
00396           // data stored as: u, y0, y1, v, y2, y3
00397           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[1], data[0], data[3]));
00398           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[2], data[0], data[3]));
00399           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[4], data[0], data[3]));
00400           (*aptr++) = PixRGB<T>(PixVideoYUV<double>(data[5], data[0], data[3]));
00401           data += 6;
00402         }
00403     }
00404 
00405   return dest;
00406 }
00407 
00408 // ######################################################################
00409 template <class T>
00410 Image<PixRGB<T> > fromVideoYUV444P(const T* data, const size_t length,
00411                                    const Dims& dims)
00412 {
00413 GVX_TRACE(__PRETTY_FUNCTION__);
00414 
00415   GVX_ERR_CONTEXT("converting from YUV444P to RGB");
00416 
00417   checkBufferLength(length, dims.sz() * 3);
00418 
00419   Image< PixRGB<T> > dest(dims, NO_INIT);
00420   int w = dims.w(), h = dims.h();
00421   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00422   const T *yptr = data;
00423   const T *uptr = yptr + w * h;
00424   const T *vptr = uptr + w * h;
00425 
00426   for (int j = 0; j < h; j ++)
00427     for (int i = 0; i < w; i ++)
00428       {
00429         // we have a 1 luminance pixels per chroma pair
00430         (*aptr++) = PixRGB<T>(PixVideoYUV<double>(*yptr++, *uptr++, *vptr++));
00431       }
00432 
00433   return dest;
00434 }
00435 
00436 // ######################################################################
00437 template <>
00438 Image<PixRGB<byte> > fromVideoYUV422P(const byte* data, const size_t length,
00439                                       const Dims& dims)
00440 {
00441 GVX_TRACE(__PRETTY_FUNCTION__);
00442 
00443   GVX_ERR_CONTEXT("converting from YUV422P(byte) to RGB");
00444 
00445   const int w = dims.w();
00446   const int h = dims.h();
00447 
00448   checkBufferLength(length, dims.sz() + 2*(w/2)*h);
00449 
00450   Image<PixRGB<byte> > dest(dims, NO_INIT);
00451   const byte* yptr = data;
00452   const byte* uptr = yptr + w * h;
00453   const byte* vptr = uptr + (w/2) * h;
00454 
00455   yuv422p_to_rgb24_c(reinterpret_cast<byte*>(dest.getArrayPtr()),
00456                      w, h,
00457                      yptr, uptr, vptr);
00458 
00459   return dest;
00460 }
00461 
00462 // ######################################################################
00463 template <class T>
00464 Image<PixRGB<T> > fromVideoYUV422P(const T* data, const size_t length,
00465                                    const Dims& dims)
00466 {
00467 GVX_TRACE(__PRETTY_FUNCTION__);
00468 
00469   GVX_ERR_CONTEXT("converting from YUV422P to RGB");
00470 
00471   const int w = dims.w();
00472   const int h = dims.h();
00473 
00474   checkBufferLength(length, dims.sz() + 2*(w/2)*h);
00475 
00476   Image< PixRGB<T> > dest(dims, NO_INIT);
00477   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00478   const T *yptr = data;
00479   const T *uptr = yptr + w * h;
00480   const T *vptr = uptr + (w/2) * h;
00481 
00482   for (int j = 0; j < h; j ++)
00483     for (int i = 0; i < w; i += 2)
00484       {
00485         // we have 2 luminance pixels per chroma pair
00486 
00487         const double yf1 = double(*yptr++) - VIDEOYUV_Y_OFFSET;
00488         const double uf = double(*uptr++) - VIDEOYUV_UV_OFFSET;
00489         const double vf = double(*vptr++) - VIDEOYUV_UV_OFFSET;
00490 
00491         const double rf1 = VIDEOYUV_RGB_Y*yf1                   + VIDEOYUV_R_V*vf;
00492         const double gf1 = VIDEOYUV_RGB_Y*yf1 + VIDEOYUV_G_U*uf + VIDEOYUV_G_V*vf;
00493         const double bf1 = VIDEOYUV_RGB_Y*yf1 + VIDEOYUV_B_U*uf;
00494 
00495         aptr->p[0] = clamped_rounded_convert<T>(clampValue(rf1, 0.0, 255.0));
00496         aptr->p[1] = clamped_rounded_convert<T>(clampValue(gf1, 0.0, 255.0));
00497         aptr->p[2] = clamped_rounded_convert<T>(clampValue(bf1, 0.0, 255.0));
00498         ++aptr;
00499 
00500         const double yf2 = double(*yptr++) - VIDEOYUV_Y_OFFSET;
00501 
00502         const double rf2 = VIDEOYUV_RGB_Y*yf2                   + VIDEOYUV_R_V*vf;
00503         const double gf2 = VIDEOYUV_RGB_Y*yf2 + VIDEOYUV_G_U*uf + VIDEOYUV_G_V*vf;
00504         const double bf2 = VIDEOYUV_RGB_Y*yf2 + VIDEOYUV_B_U*uf;
00505 
00506         aptr->p[0] = clamped_rounded_convert<T>(clampValue(rf2, 0.0, 255.0));
00507         aptr->p[1] = clamped_rounded_convert<T>(clampValue(gf2, 0.0, 255.0));
00508         aptr->p[2] = clamped_rounded_convert<T>(clampValue(bf2, 0.0, 255.0));
00509         ++aptr;
00510       }
00511 
00512   return dest;
00513 }
00514 
00515 // ######################################################################
00516 template <class T>
00517 Image<PixRGB<T> > fromVideoYUV411P(const T* data, const size_t length,
00518                                    const Dims& dims)
00519 {
00520 GVX_TRACE(__PRETTY_FUNCTION__);
00521 
00522   GVX_ERR_CONTEXT("converting from YUV411P to RGB");
00523 
00524   const int w = dims.w();
00525   const int h = dims.h();
00526 
00527   checkBufferLength(length, dims.sz() + 2*(w/4)*h);
00528 
00529   Image< PixRGB<T> > dest(dims, NO_INIT);
00530   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00531   const T *yptr = data;
00532   const T *uptr = yptr + w * h;
00533   const T *vptr = uptr + (w/4) * h;
00534 
00535   for (int j = 0; j < h; j ++)
00536     for (int i = 0; i < w; i += 4)
00537       {
00538         // we have a 4 luminance pixels per chroma pair
00539         (*aptr++) = PixRGB<T>(PixVideoYUV<double>(*yptr++, *uptr, *vptr));
00540         (*aptr++) = PixRGB<T>(PixVideoYUV<double>(*yptr++, *uptr, *vptr));
00541         (*aptr++) = PixRGB<T>(PixVideoYUV<double>(*yptr++, *uptr, *vptr));
00542         (*aptr++) = PixRGB<T>(PixVideoYUV<double>(*yptr++, *uptr++, *vptr++));
00543       }
00544 
00545   return dest;
00546 }
00547 
00548 // ######################################################################
00549 template <>
00550 Image<PixRGB<byte> > fromVideoYUV420P(const byte* data, const size_t length,
00551                                       const Dims& dims)
00552 {
00553 GVX_TRACE(__PRETTY_FUNCTION__);
00554 
00555   // Here we have a specialization of fromVideoYUV420P that uses the
00556   // more optimized implementation from yv12_to_rgb24_c(). However,
00557   // both the straightforward implementation of fromVideoYUV422P and
00558   // the optimized implementation use the same conversion factors
00559   // (VIDEOYUV_RGB_Y, VIDEOYUV_R_V, etc.)  given in Image/colorDefs.H,
00560   // so either implementation should give the same result.
00561 
00562   GVX_ERR_CONTEXT("converting from YUV420P(byte) to RGB");
00563 
00564   const int w = dims.w();
00565   const int h = dims.h();
00566   // we have to do (w+1)/2 instead of just w/2, because if e.g. the y
00567   // array has 5 pixels, then we want the u and v arrays to have 3
00568   // pixels, not 2:
00569   const int w2 = (w+1)/2;
00570   const int h2 = (h+1)/2;
00571 
00572   checkBufferLength(length, dims.sz() + 2*w2*h2);
00573 
00574   Image< PixRGB<byte> > dest(dims, NO_INIT);
00575 
00576   yv12_to_rgb24_c(reinterpret_cast<byte*>(dest.getArrayPtr()),
00577                   w /* dst_stride */,
00578                   data,
00579                   data + w*h,
00580                   data + w*h + w2*h2,
00581                   w /* y_stride */,
00582                   w2 /* uv_stride */,
00583                   w /* image width */,
00584                   h /* image height */);
00585 
00586   return dest;
00587 }
00588 
00589 
00590 // ######################################################################
00591 template <class T>
00592 Image<PixRGB<T> > fromVideoYUV420P(const T* data, const size_t length,
00593                                    const Dims& dims)
00594 {
00595 GVX_TRACE(__PRETTY_FUNCTION__);
00596 
00597   GVX_ERR_CONTEXT("converting from YUV420P to RGB");
00598 
00599   const int w = dims.w();
00600   const int h = dims.h();
00601   // we have to do (w+1)/2 instead of just w/2, because if e.g. the y
00602   // array has 5 pixels, then we want the u and v arrays to have 3
00603   // pixels, not 2:
00604   const int w2 = (w+1)/2;
00605   const int h2 = (h+1)/2;
00606 
00607   checkBufferLength(length, dims.sz() + 2*w2*h2);
00608 
00609   Image< PixRGB<T> > dest(dims, NO_INIT);
00610   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00611   const T *yptr = data;
00612   const T *uptr = yptr + w * h;
00613   const T *vptr = uptr + (w/2) * (h/2);
00614 
00615   for (int j = 0; j < h; j += 2)
00616     {
00617       for (int i = 0; i < w; i += 2)
00618         {
00619           T u = *uptr++, v = *vptr++;
00620           // we have a 2x2 luminance block per chroma pair
00621           (*aptr) = PixRGB<T>(PixVideoYUV<double>(*yptr, u, v));
00622           aptr[1] = PixRGB<T>(PixVideoYUV<double>(yptr[1], u, v));
00623           aptr[w] = PixRGB<T>(PixVideoYUV<double>(yptr[w], u, v));
00624           aptr[w+1] = PixRGB<T>(PixVideoYUV<double>(yptr[w + 1], u, v));
00625           aptr += 2; yptr += 2;
00626         }
00627       aptr += w; yptr += w;
00628     }
00629 
00630   return dest;
00631 }
00632 
00633 // ######################################################################
00634         template <>
00635 Image<PixRGB<byte> > fromVideoHM12(const byte* data, const size_t length,
00636                 const Dims& dims)
00637 {
00638         GVX_TRACE(__PRETTY_FUNCTION__);
00639 
00640         // Here we have a specialization of fromVideoHM12 that uses the
00641         // more optimized implementation from yv12_to_rgb24_c(). However,
00642         // both the straightforward implementation of fromVideoYUV422P and
00643         // the optimized implementation use the same conversion factors
00644         // (VIDEOYUV_RGB_Y, VIDEOYUV_R_V, etc.)  given in Image/colorDefs.H,
00645         // so either implementation should give the same result.
00646 
00647         GVX_ERR_CONTEXT("converting from HM12(byte) to RGB");
00648 
00649         const int w = dims.w();
00650         const int h = dims.h();
00651         checkBufferLength(length, w*h*3/2);
00652 
00653         Image< PixRGB<byte> > dest(dims, NO_INIT);
00654         Image<PixRGB<byte> >::iterator aptr = dest.beginw();
00655         const byte *yptr = data;
00656         const byte *uvptr = yptr + w * h;
00657 
00658         unsigned char frameu[w*h / 4];
00659         unsigned char framev[w*h / 4];
00660 
00661         // descramble U/V plane
00662         // dstride = 720 / 2 = w
00663         // The U/V values are interlaced (UVUV...).
00664         // Again, the UV plane is divided into blocks of 16x16 UV values.
00665         // Each block in transmitted in turn, line-by-line.
00666         for (int y = 0; y < h/2; y += 16) {
00667                 for (int x = 0; x < w/2; x += 8) {
00668                         for (int i = 0; i < 16; i++) {
00669                                 int idx = x + (y + i) * (w/2);
00670 
00671                                 frameu[idx+0] = uvptr[0];  framev[idx+0] = uvptr[1];
00672                                 frameu[idx+1] = uvptr[2];  framev[idx+1] = uvptr[3];
00673                                 frameu[idx+2] = uvptr[4];  framev[idx+2] = uvptr[5];
00674                                 frameu[idx+3] = uvptr[6];  framev[idx+3] = uvptr[7];
00675                                 frameu[idx+4] = uvptr[8];  framev[idx+4] = uvptr[9];
00676                                 frameu[idx+5] = uvptr[10]; framev[idx+5] = uvptr[11];
00677                                 frameu[idx+6] = uvptr[12]; framev[idx+6] = uvptr[13];
00678                                 frameu[idx+7] = uvptr[14]; framev[idx+7] = uvptr[15];
00679                                 uvptr += 16;
00680                         }
00681                 }
00682         }
00683 
00684         for (int y = 0; y < h; y += 16)
00685         {
00686                 for (int x = 0; x < w; x += 16)
00687                 {
00688                         ////the Y plane is divided into blocks of 16x16
00689                         for(int i=0; i<16; i++)
00690                         {
00691                                 for(int j=0; j<16; j++)
00692                                 {
00693                                         int idx = (x/2)+ (((y/2)+i/2)*w/2) + j/2;
00694                                         *(aptr+x+(y+i)*w+j) = PixRGB<byte>(PixVideoYUV<double>(*(yptr+j), frameu[idx], framev[idx]));
00695                                 }
00696                                 yptr+=16;
00697                         }
00698                 }
00699         }
00700 
00701   return dest;
00702 
00703 }
00704 
00705 // ######################################################################
00706 template <class T>
00707 Image<PixRGB<T> > fromVideoHM12(const T* data, const size_t length,
00708                                    const Dims& dims)
00709 {
00710 GVX_TRACE(__PRETTY_FUNCTION__);
00711 
00712   GVX_ERR_CONTEXT("converting from YUV420P to RGB");
00713 
00714   const int w = dims.w();
00715   const int h = dims.h();
00716   // we have to do (w+1)/2 instead of just w/2, because if e.g. the y
00717   // array has 5 pixels, then we want the u and v arrays to have 3
00718   // pixels, not 2:
00719   const int w2 = (w+1)/2;
00720   const int h2 = (h+1)/2;
00721 
00722   checkBufferLength(length, dims.sz() + 2*w2*h2);
00723 
00724   Image< PixRGB<T> > dest(dims, NO_INIT);
00725   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00726   const T *yptr = data;
00727   const T *uptr = yptr + w * h;
00728   const T *vptr = uptr + (w/2) * (h/2);
00729 
00730   for (int j = 0; j < h; j += 2)
00731     {
00732       for (int i = 0; i < w; i += 2)
00733         {
00734           T u = *uptr++, v = *vptr++;
00735           // we have a 2x2 luminance block per chroma pair
00736           (*aptr) = PixRGB<T>(PixVideoYUV<double>(*yptr, u, v));
00737           aptr[1] = PixRGB<T>(PixVideoYUV<double>(yptr[1], u, v));
00738           aptr[w] = PixRGB<T>(PixVideoYUV<double>(yptr[w], u, v));
00739           aptr[w+1] = PixRGB<T>(PixVideoYUV<double>(yptr[w + 1], u, v));
00740           aptr += 2; yptr += 2;
00741         }
00742       aptr += w; yptr += w;
00743     }
00744 
00745   return dest;
00746 }
00747 
00748 template <class T>
00749 Image<PixRGB<T> > fromVideoMJPEG(const T* data, const size_t length, const Dims& dims)
00750 {
00751   Image< PixRGB<T> > dest(dims, NO_INIT);
00752   LINFO("LENGTH:::::  %"ZU, length);
00753 
00754   JPEGDecompressor decomp;
00755 //  std::vector<unsigned char> vectorData(data, length);
00756 std::vector<unsigned char> vectorData(length);
00757 
00758 for(unsigned int i=0; i<length; i++)
00759 { vectorData[i] = (unsigned char)data[i]; }
00760 
00761 
00762   LINFO("Made A Vector of Size: %"ZU, vectorData.size());
00763   return decomp.DecompressImage(vectorData);
00764 
00765 
00766   return dest;
00767 }
00768 
00769 
00770 // ######################################################################
00771 template <class T>
00772 Image<PixRGB<T> > fromVideoYUV410P(const T* data, const size_t length,
00773                                    const Dims& dims)
00774 {
00775 GVX_TRACE(__PRETTY_FUNCTION__);
00776 
00777   GVX_ERR_CONTEXT("converting from YUV410P to RGB");
00778 
00779   checkBufferLength(length, dims.sz() * 9 / 8);
00780 
00781   Image< PixRGB<T> > dest(dims, NO_INIT);
00782   int w = dims.w(), h = dims.h(); int w2 = w * 2, w3 = w * 3;
00783   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00784   const T *yptr = data;
00785   const T *uptr = yptr + w * h;
00786   const T *vptr = uptr + (w/4) * (h/4);
00787 
00788   for (int j = 0; j < h; j += 4)
00789     {
00790       for (int i = 0; i < w; i += 4)
00791         {
00792           T u = *uptr++, v = *vptr++;
00793 
00794           // we have a 4x4 luminance block per chroma pair
00795           (*aptr) = PixRGB<T>(PixVideoYUV<double>(*yptr, u, v));
00796           aptr[1] = PixRGB<T>(PixVideoYUV<double>(yptr[1], u, v));
00797           aptr[2] = PixRGB<T>(PixVideoYUV<double>(yptr[2], u, v));
00798           aptr[3] = PixRGB<T>(PixVideoYUV<double>(yptr[3], u, v));
00799 
00800           aptr[w] = PixRGB<T>(PixVideoYUV<double>(yptr[w], u, v));
00801           aptr[w+1] = PixRGB<T>(PixVideoYUV<double>(yptr[w+1], u, v));
00802           aptr[w+2] = PixRGB<T>(PixVideoYUV<double>(yptr[w+2], u, v));
00803           aptr[w+3] = PixRGB<T>(PixVideoYUV<double>(yptr[w+3], u, v));
00804 
00805           aptr[w2] = PixRGB<T>(PixVideoYUV<double>(yptr[w2], u, v));
00806           aptr[w2+1] = PixRGB<T>(PixVideoYUV<double>(yptr[w2+1], u, v));
00807           aptr[w2+2] = PixRGB<T>(PixVideoYUV<double>(yptr[w2+2], u, v));
00808           aptr[w2+3] = PixRGB<T>(PixVideoYUV<double>(yptr[w2+3], u, v));
00809 
00810           aptr[w3] = PixRGB<T>(PixVideoYUV<double>(yptr[w3], u, v));
00811           aptr[w3+1] = PixRGB<T>(PixVideoYUV<double>(yptr[w3+1], u, v));
00812           aptr[w3+2] = PixRGB<T>(PixVideoYUV<double>(yptr[w3+2], u, v));
00813           aptr[w3+3] = PixRGB<T>(PixVideoYUV<double>(yptr[w3+3], u, v));
00814 
00815           aptr += 4; yptr += 4;
00816         }
00817       aptr += w3; yptr += w3;
00818     }
00819 
00820   return dest;
00821 }
00822 
00823 // ######################################################################
00824 template <class T>
00825 Image<PixRGB<T> > fromMono(const T* data, const size_t length,
00826                            const Dims& dims)
00827 {
00828 GVX_TRACE(__PRETTY_FUNCTION__);
00829 
00830   GVX_ERR_CONTEXT("converting from mono to RGB");
00831 
00832   checkBufferLength(length, dims.sz());
00833 
00834   Image< PixRGB<T> > dest(dims, NO_INIT);
00835   typename Image<PixRGB<T> >::iterator aptr = dest.beginw();
00836   typename Image<PixRGB<T> >::iterator stop = dest.endw();
00837 
00838   T m;
00839   while(aptr != stop)
00840     {
00841       m = (*data++); (*aptr++).set(m, m, m);
00842     }
00843 
00844   return dest;
00845 }
00846 
00847 // ######################################################################
00848 Image<PixRGB<byte> > fromBayer(const byte* data, const size_t length,
00849                             const Dims& dims, BayerFormat ft)
00850 {
00851 GVX_TRACE(__PRETTY_FUNCTION__);
00852 
00853   GVX_ERR_CONTEXT("converting from bayer_GB to RGB");
00854 
00855   checkBufferLength(length, dims.sz());
00856 
00857   Image<byte> src(data,dims);
00858   Image<PixRGB<byte> > dest(dims, NO_INIT);
00859   dest = deBayer(src, ft);
00860 
00861   return dest;
00862 }
00863 
00864 // ######################################################################
00865 Image<PixRGB<uint16> > fromBayerU16(const uint16* data, const size_t length,
00866                             const Dims& dims, BayerFormat ft)
00867 {
00868 GVX_TRACE(__PRETTY_FUNCTION__);
00869 
00870   GVX_ERR_CONTEXT("converting from bayer_GB12 to RGB");
00871 
00872   checkBufferLength(length, dims.sz()*sizeof(uint16));
00873 
00874   Image<uint16> src(data,dims);
00875   Image<PixRGB<uint16> > dest(dims, NO_INIT);
00876   dest = deBayer(src, ft);
00877 
00878   return dest;
00879 }
00880 
00881 // ######################################################################
00882 void toVideoYUV422(const Image<PixRGB<byte> > img,
00883                    byte* y, byte *u, byte *v,
00884                    const int ystride,
00885                    const int ustride,
00886                    const int vstride)
00887 {
00888 GVX_TRACE(__PRETTY_FUNCTION__);
00889   const byte* src = reinterpret_cast<const byte*>(img.getArrayPtr());
00890   int w = img.getWidth(), h = img.getHeight();
00891 
00892   for (int j = 0; j < h; j += 2)
00893     {
00894       // NOTE: the formulas here is in Pixels.H, but we don't use the
00895       // functions from there just for higher speed here.
00896 
00897       // NOTE: this code will probably not work with odd image or sizes
00898 
00899       for (int i = 0; i < w; i += 2)
00900         {
00901           // fully convert first RGB pixel:
00902           double r = double(*src++);
00903           double g = double(*src++);
00904           double b = double(*src++);
00905 
00906           double yf = VIDEOYUV_Y_R*r + VIDEOYUV_Y_G*g + VIDEOYUV_Y_B*b;
00907           double uf = VIDEOYUV_U_R*r + VIDEOYUV_U_G*g + VIDEOYUV_U_B*b + VIDEOYUV_UV_OFFSET;
00908           double vf = VIDEOYUV_V_R*r + VIDEOYUV_V_G*g + VIDEOYUV_V_B*b + VIDEOYUV_UV_OFFSET;
00909 
00910           *y++ = clamped_rounded_convert<byte>(yf);
00911           *u++ = clamped_rounded_convert<byte>(uf);
00912           *v++ = clamped_rounded_convert<byte>(vf);
00913 
00914           // only get luminance for second RGB pixel:
00915           r = double(*src++);
00916           g = double(*src++);
00917           b = double(*src++);
00918 
00919           yf =  VIDEOYUV_Y_R*r + VIDEOYUV_Y_G*g + VIDEOYUV_Y_B*b;
00920           *y++ = clamped_rounded_convert<byte>(yf);
00921         }
00922       y += ystride; u += ustride; v += vstride;
00923       for (int i = 0; i < w; i += 2)
00924         {
00925           // only get luminance for third RGB pixel:
00926           double r = double(*src++);
00927           double g = double(*src++);
00928           double b = double(*src++);
00929 
00930           double yf =  VIDEOYUV_Y_R*r + VIDEOYUV_Y_G*g + VIDEOYUV_Y_B*b;
00931           *y++ = clamped_rounded_convert<byte>(yf);
00932 
00933           // only get luminance for fourth RGB pixel:
00934           r = double(*src++);
00935           g = double(*src++);
00936           b = double(*src++);
00937 
00938           yf =  VIDEOYUV_Y_R*r + VIDEOYUV_Y_G*g + VIDEOYUV_Y_B*b;
00939           *y++ = clamped_rounded_convert<byte>(yf);
00940         }
00941       y += ystride;
00942     }
00943 }
00944 
00945 // Include the explicit instantiations
00946 #include "inst/Video/RgbConversion.I"
00947 
00948 // ######################################################################
00949 /* So things look consistent in everyone's emacs... */
00950 /* Local Variables: */
00951 /* mode: c++ */
00952 /* indent-tabs-mode: nil */
00953 /* End: */
00954 
00955 #endif // VIDEO_RGBCONVERSION_C_DEFINED
Generated on Sun May 8 08:42:36 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3