VideoFormatCoercion.C

Go to the documentation of this file.
00001 /*!@file Video/VideoFormatCoercion.C */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Rob Peters <rjpeters at usc dot edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Video/VideoFormatCoercion.C $
00035 // $Id: VideoFormatCoercion.C 9271 2008-02-15 17:59:27Z rjpeters $
00036 //
00037 
00038 #ifndef VIDEO_VIDEOFORMATCOERCION_C_DEFINED
00039 #define VIDEO_VIDEOFORMATCOERCION_C_DEFINED
00040 
00041 #include "Video/VideoFormatCoercion.H"
00042 
00043 #include "Image/Image.H"
00044 #include "Image/Pixels.H"
00045 #include "Util/log.H"
00046 #include "Util/sformat.H"
00047 #include "Video/VideoFrame.H"
00048 #include "Video/RgbConversion.H"
00049 
00050 // ######################################################################
00051 static void checkBufferLength(const size_t actual, const size_t expected)
00052 {
00053   if (actual < expected)
00054     LFATAL("input buffer is too short (got %"ZU", expected %"ZU")",
00055            actual, expected);
00056 
00057   if (actual > expected)
00058     LINFO("input buffer is longer than expected (got %"ZU", expected %"ZU")\n"
00059           "(this is not a fatal error, but make sure the width and height are correct)",
00060           actual, expected);
00061 }
00062 
00063 // ######################################################################
00064 // ######################################################################
00065 // conversions to RGB24
00066 // ######################################################################
00067 // ######################################################################
00068 
00069 // ######################################################################
00070 VideoFrame RGB_to_RGB24(const byte* sptr, const size_t length,
00071                         const Dims& dims, const bool byteswap)
00072 {
00073   return VideoFrame(fromRGB(sptr, length, dims, byteswap));
00074 }
00075 
00076 // ######################################################################
00077 VideoFrame ARGB_to_RGB24(const byte* sptr, const size_t length,
00078                          const Dims& dims, const bool byteswap)
00079 {
00080   return VideoFrame(fromARGB(sptr, length, dims, byteswap));
00081 }
00082 
00083 // ######################################################################
00084 VideoFrame RGB555_to_RGB24(const byte* sptr, const size_t length,
00085                            const Dims& dims, const bool byteswap)
00086 {
00087   return VideoFrame(fromRGB555(sptr, length, dims, byteswap));
00088 }
00089 
00090 // ######################################################################
00091 VideoFrame RGB565_to_RGB24(const byte* sptr, const size_t length,
00092                            const Dims& dims, const bool byteswap)
00093 {
00094   return VideoFrame(fromRGB565(sptr, length, dims, byteswap));
00095 }
00096 
00097 // ######################################################################
00098 VideoFrame YUV24_to_RGB24(const byte* sptr, const size_t length,
00099                           const Dims& dims, const bool byteswap)
00100 {
00101   return VideoFrame(fromVideoYUV24(sptr, length, dims, byteswap));
00102 }
00103 
00104 // ######################################################################
00105 VideoFrame YUV444_to_RGB24(const byte* sptr, const size_t length,
00106                            const Dims& dims, const bool byteswap)
00107 {
00108   return VideoFrame(fromVideoYUV444(sptr, length, dims, byteswap));
00109 }
00110 
00111 // ######################################################################
00112 VideoFrame YUYV_to_RGB24(const byte* sptr, const size_t length,
00113                          const Dims& dims, const bool byteswap)
00114 {
00115   return VideoFrame(fromVideoYUV422(sptr, length, dims, !byteswap));
00116 }
00117 
00118 // ######################################################################
00119 VideoFrame YUV422_to_RGB24(const byte* sptr, const size_t length,
00120                            const Dims& dims, const bool byteswap)
00121 {
00122   return VideoFrame(fromVideoYUV422(sptr, length, dims, byteswap));
00123 }
00124 
00125 // ######################################################################
00126 VideoFrame YUV411_to_RGB24(const byte* sptr, const size_t length,
00127                            const Dims& dims, const bool byteswap)
00128 {
00129   return VideoFrame(fromVideoYUV411(sptr, length, dims, byteswap));
00130 }
00131 
00132 // ######################################################################
00133 VideoFrame YUV444P_to_RGB24(const byte* sptr, const size_t length,
00134                             const Dims& dims, const bool byteswap)
00135 {
00136   return VideoFrame(fromVideoYUV444P(sptr, length, dims));
00137 }
00138 
00139 // ######################################################################
00140 VideoFrame YUV422P_to_RGB24(const byte* sptr, const size_t length,
00141                             const Dims& dims, const bool byteswap)
00142 {
00143   return VideoFrame(fromVideoYUV422P(sptr, length, dims));
00144 }
00145 
00146 // ######################################################################
00147 VideoFrame YUV411P_to_RGB24(const byte* sptr, const size_t length,
00148                             const Dims& dims, const bool byteswap)
00149 {
00150   return VideoFrame(fromVideoYUV411P(sptr, length, dims));
00151 }
00152 
00153 // ######################################################################
00154 VideoFrame YUV420P_to_RGB24(const byte* sptr, const size_t length,
00155                             const Dims& dims, const bool byteswap)
00156 {
00157   return VideoFrame(fromVideoYUV420P(sptr, length, dims));
00158 }
00159 
00160 // ######################################################################
00161 VideoFrame YUV410P_to_RGB24(const byte* sptr, const size_t length,
00162                             const Dims& dims, const bool byteswap)
00163 {
00164   return VideoFrame(fromVideoYUV410P(sptr, length, dims));
00165 }
00166 
00167 // ######################################################################
00168 VideoFrame GREY_to_RGB24(const byte* sptr, const size_t length,
00169                          const Dims& dims, const bool byteswap)
00170 {
00171   return VideoFrame(fromMono(sptr, length, dims));
00172 }
00173 
00174 
00175 
00176 
00177 // ######################################################################
00178 // ######################################################################
00179 // conversions from RGB24
00180 // ######################################################################
00181 // ######################################################################
00182 
00183 // ######################################################################
00184 VideoFrame RGB24_to_GREY(const byte* sptr, const size_t length,
00185                          const Dims& dims, const bool byteswap)
00186 {
00187   checkBufferLength(length, dims.sz() * 3);
00188 
00189   Image<byte> dst(dims, NO_INIT);
00190 
00191   Image<byte>::iterator dptr = dst.beginw();
00192   Image<byte>::iterator stop = dst.endw();
00193 
00194   while (dptr != stop)
00195     {
00196       // byteswap does not matter here since (R+G+B)/3 == (B+G+R)/3
00197       *dptr++ = byte(0.5 + (sptr[0] + sptr[1] + sptr[2]) / 3.0);
00198       sptr += 3;
00199     }
00200 
00201   return VideoFrame(dst);
00202 }
00203 
00204 // ######################################################################
00205 VideoFrame RGB24_to_RGB555(const byte* sptr, const size_t length,
00206                            const Dims& dims, const bool byteswap)
00207 {
00208   checkBufferLength(length, dims.sz() * 3);
00209 
00210   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() * 2, 1), NO_INIT));
00211   byte* dptr = dst.uniq().dataw();
00212   byte* stop = dst.uniq().endw();
00213 
00214   if (byteswap)
00215     while (dptr != stop)
00216       {
00217         // data stored as: 1 ignored bit, 5-bit R, 5-bit G, 5-bit B
00218         const int r = sptr[2];
00219         const int g = sptr[1];
00220         const int b = sptr[0];
00221 
00222         const uint16 val = ((r >> 3) << 10) + ((g >> 3) << 5) + (b >> 3);
00223         // big-endian:
00224         *dptr++ = (val & 0xff00) >> 8;
00225         *dptr++ = (val & 0x00ff);
00226         sptr += 3;
00227       }
00228   else
00229     while (dptr != stop)
00230       {
00231         // data stored as: 1 ignored bit, 5-bit R, 5-bit G, 5-bit B
00232         const int r = sptr[0];
00233         const int g = sptr[1];
00234         const int b = sptr[2];
00235 
00236         const uint16 val = ((r >> 3) << 10) + ((g >> 3) << 5) + (b >> 3);
00237         // big-endian:
00238         *dptr++ = (val & 0xff00) >> 8;
00239         *dptr++ = (val & 0x00ff);
00240         sptr += 3;
00241       }
00242 
00243   return VideoFrame(dst, dims, VIDFMT_RGB555, false);
00244 }
00245 
00246 // ######################################################################
00247 VideoFrame RGB24_to_RGB565(const byte* sptr, const size_t length,
00248                            const Dims& dims, const bool byteswap)
00249 {
00250   checkBufferLength(length, dims.sz() * 3);
00251 
00252   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() * 2, 1), NO_INIT));
00253   byte* dptr = dst.uniq().dataw();
00254   byte* stop = dst.uniq().endw();
00255 
00256   if (byteswap)
00257     while (dptr != stop)
00258       {
00259         // data stored as: 5-bit R, 6-bit G, 5-bit B
00260         const int r = sptr[2];
00261         const int g = sptr[1];
00262         const int b = sptr[0];
00263 
00264         const uint16 val = ((r >> 3) << 11) + ((g >> 2) << 5) + (b >> 3);
00265         // big-endian:
00266         *dptr++ = (val & 0xff00) >> 8;
00267         *dptr++ = (val & 0x00ff);
00268         sptr += 3;
00269       }
00270   else
00271     while (dptr != stop)
00272       {
00273         // data stored as: 5-bit R, 6-bit G, 5-bit B
00274         const int r = sptr[0];
00275         const int g = sptr[1];
00276         const int b = sptr[2];
00277 
00278         const uint16 val = ((r >> 3) << 11) + ((g >> 2) << 5) + (b >> 3);
00279         // big-endian:
00280         *dptr++ = (val & 0xff00) >> 8;
00281         *dptr++ = (val & 0x00ff);
00282         sptr += 3;
00283       }
00284 
00285   return VideoFrame(dst, dims, VIDFMT_RGB565, false);
00286 }
00287 
00288 // ######################################################################
00289 VideoFrame RGB24_to_RGB32(const byte* sptr, const size_t length,
00290                           const Dims& dims, const bool byteswap)
00291 {
00292   checkBufferLength(length, dims.sz() * 3);
00293 
00294   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() * 4, 1), NO_INIT));
00295   byte* dptr = dst.uniq().dataw();
00296   byte* stop = dst.uniq().endw();
00297 
00298   if (byteswap)
00299     while (dptr != stop)
00300       {
00301         *dptr++ = 0;
00302         *dptr++ = sptr[2];
00303         *dptr++ = sptr[1];
00304         *dptr++ = sptr[0];
00305         sptr += 3;
00306       }
00307   else
00308     while (dptr != stop)
00309       {
00310         *dptr++ = 0;
00311         *dptr++ = sptr[0];
00312         *dptr++ = sptr[1];
00313         *dptr++ = sptr[2];
00314         sptr += 3;
00315       }
00316 
00317   return VideoFrame(dst, dims, VIDFMT_RGB32, false);
00318 }
00319 
00320 // ######################################################################
00321 // ######################################################################
00322 // conversions to YUV24
00323 // ######################################################################
00324 // ######################################################################
00325 
00326 // ######################################################################
00327 VideoFrame GREY_to_YUV24(const byte* sptr, const size_t length,
00328                          const Dims& dims, const bool byteswap)
00329 {
00330   const int sz = dims.sz();
00331 
00332   checkBufferLength(length, sz);
00333 
00334   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00335   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00336 
00337   for (int i = 0; i < sz; ++i)
00338     *dptr++ = PixVideoYUV<byte>(PixRGB<double>(sptr[i], sptr[i], sptr[i]));
00339 
00340   ASSERT(dptr == dst.endw());
00341 
00342   return VideoFrame(dst);
00343 }
00344 
00345 // ######################################################################
00346 VideoFrame RGB24_to_YUV24(const byte* sptr, const size_t length,
00347                           const Dims& dims, const bool byteswap)
00348 {
00349   const int sz = dims.sz() * 3;
00350 
00351   checkBufferLength(length, sz);
00352 
00353   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00354   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00355 
00356   if (byteswap)
00357     for (int i = 0; i < sz; i += 3)
00358       *dptr++ = PixVideoYUV<byte>(PixRGB<double>(sptr[i+2], sptr[i+1], sptr[i]));
00359   else
00360     for (int i = 0; i < sz; i += 3)
00361       *dptr++ = PixVideoYUV<byte>(PixRGB<double>(sptr[i], sptr[i+1], sptr[i+2]));
00362 
00363   ASSERT(dptr == dst.endw());
00364 
00365   return VideoFrame(dst);
00366 }
00367 
00368 // ######################################################################
00369 VideoFrame RGB32_to_YUV24(const byte* sptr, const size_t length,
00370                           const Dims& dims, const bool byteswap)
00371 {
00372   const int sz = dims.sz() * 4;
00373 
00374   checkBufferLength(length, sz);
00375 
00376   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00377   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00378 
00379   if (byteswap)
00380     for (int i = 0; i < sz; i += 4)
00381       *dptr++ = PixVideoYUV<byte>(PixRGB<double>(sptr[i+2], sptr[i+1], sptr[i]));
00382   else
00383     for (int i = 0; i < sz; i += 4)
00384       *dptr++ = PixVideoYUV<byte>(PixRGB<double>(sptr[i+1], sptr[i+2], sptr[i+3]));
00385 
00386   ASSERT(dptr == dst.endw());
00387 
00388   return VideoFrame(dst);
00389 }
00390 
00391 // ######################################################################
00392 VideoFrame YUV444_to_YUV24(const byte* sptr, const size_t length,
00393                            const Dims& dims, const bool byteswap)
00394 {
00395   const int sz = dims.sz() * 3;
00396 
00397   checkBufferLength(length, sz);
00398 
00399   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00400   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00401 
00402   if (byteswap)
00403     for (int i = 0; i < sz; i += 6)
00404       {
00405         // data stored as: y0, u0, u1, v0, v1, y1
00406         *dptr++ = PixVideoYUV<byte>(PixRGB<double>(sptr[i], sptr[i+1], sptr[i+3]));
00407         *dptr++ = PixVideoYUV<byte>(PixRGB<double>(sptr[i+5], sptr[i+2], sptr[i+4]));
00408       }
00409   else
00410     for (int i = 0; i < sz; i += 6)
00411       {
00412         // data stored as: u0, y0, v0, u1, y1, v1
00413         *dptr++ = PixVideoYUV<byte>(PixRGB<double>(sptr[i+1], sptr[i+0], sptr[i+2]));
00414         *dptr++ = PixVideoYUV<byte>(PixRGB<double>(sptr[i+4], sptr[i+3], sptr[i+5]));
00415       }
00416 
00417   ASSERT(dptr == dst.endw());
00418 
00419   return VideoFrame(dst);
00420 }
00421 
00422 // ######################################################################
00423 VideoFrame YUYV_to_YUV24(const byte* sptr, const size_t length,
00424                          const Dims& dims, const bool byteswap)
00425 {
00426   const int sz = dims.sz() * 2;
00427 
00428   checkBufferLength(length, sz);
00429 
00430   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00431   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00432   Image<PixVideoYUV<byte> >::iterator stop = dst.endw();
00433 
00434   if (byteswap)
00435     while (dptr != stop)
00436       {
00437         // data stored as: u, y0, v, y1
00438         *dptr++ = PixVideoYUV<byte>(sptr[1], sptr[0], sptr[2]);
00439         *dptr++ = PixVideoYUV<byte>(sptr[3], sptr[0], sptr[2]);
00440         sptr += 4;
00441       }
00442   else
00443     while (dptr != stop)
00444       {
00445         // data stored as:  y0, u, y1, v
00446         *dptr++ = PixVideoYUV<byte>(sptr[0], sptr[1], sptr[3]);
00447         *dptr++ = PixVideoYUV<byte>(sptr[2], sptr[1], sptr[3]);
00448         sptr += 4;
00449       }
00450 
00451   return VideoFrame(dst);
00452 }
00453 
00454 // ######################################################################
00455 VideoFrame YUV422_to_YUV24(const byte* sptr, const size_t length,
00456                            const Dims& dims, const bool byteswap)
00457 {
00458   return YUYV_to_YUV24(sptr, length, dims, !byteswap);
00459 }
00460 
00461 // ######################################################################
00462 VideoFrame YUV411_to_YUV24(const byte* sptr, const size_t length,
00463                            const Dims& dims, const bool byteswap)
00464 {
00465   checkBufferLength(length, dims.sz() * 3 / 2);
00466 
00467   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00468   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00469   Image<PixVideoYUV<byte> >::iterator stop = dst.endw();
00470 
00471   if (byteswap)
00472     while (dptr != stop)
00473       {
00474         // data stored as: y0, u, v, y1, y3, y2
00475         *dptr++ = PixVideoYUV<byte>(sptr[0], sptr[1], sptr[2]);
00476         *dptr++ = PixVideoYUV<byte>(sptr[3], sptr[1], sptr[2]);
00477         *dptr++ = PixVideoYUV<byte>(sptr[5], sptr[1], sptr[2]);
00478         *dptr++ = PixVideoYUV<byte>(sptr[4], sptr[1], sptr[2]);
00479         sptr += 6;
00480       }
00481   else
00482     while (dptr != stop)
00483       {
00484         // data stored as: u, y0, y1, v, y2, y3
00485         *dptr++ = PixVideoYUV<byte>(sptr[1], sptr[0], sptr[3]);
00486         *dptr++ = PixVideoYUV<byte>(sptr[2], sptr[0], sptr[3]);
00487         *dptr++ = PixVideoYUV<byte>(sptr[4], sptr[0], sptr[3]);
00488         *dptr++ = PixVideoYUV<byte>(sptr[5], sptr[0], sptr[3]);
00489         sptr += 6;
00490       }
00491 
00492   return VideoFrame(dst);
00493 }
00494 
00495 // ######################################################################
00496 VideoFrame YUV444P_to_YUV24(const byte* sptr, const size_t length,
00497                             const Dims& dims, const bool byteswap)
00498 {
00499   checkBufferLength(length, dims.sz() * 3);
00500 
00501   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00502   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00503   Image<PixVideoYUV<byte> >::iterator stop = dst.endw();
00504 
00505   const byte* yptr = sptr;
00506   const byte* uptr = yptr + dims.sz();
00507   const byte* vptr = uptr + dims.sz();
00508 
00509   if (byteswap)
00510     std::swap(uptr, vptr);
00511 
00512   while (dptr != stop)
00513     *dptr++ = PixVideoYUV<byte>(*yptr++, *uptr++, *vptr++);
00514 
00515   return VideoFrame(dst);
00516 }
00517 
00518 // ######################################################################
00519 VideoFrame YUV422P_to_YUV24(const byte* sptr, const size_t length,
00520                             const Dims& dims, const bool byteswap)
00521 {
00522   const int w = dims.w();
00523   const int h = dims.h();
00524 
00525   checkBufferLength(length, dims.sz() + 2*(w/2)*h);
00526 
00527   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00528   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00529 
00530   const byte* yptr = sptr;
00531   const byte* uptr = yptr + w * h;
00532   const byte* vptr = uptr + (w/2) * h;
00533 
00534   if (byteswap)
00535     std::swap(uptr, vptr);
00536 
00537   for (int j = 0; j < h; ++j)
00538     for (int i = 0; i < w; i += 2)
00539       {
00540         // we have 2 luminance pixels per chroma pair
00541 
00542         const byte yf1 = *yptr++;
00543         const byte yf2 = *yptr++;
00544         const byte uf = *uptr++;
00545         const byte vf = *vptr++;
00546 
00547         *dptr++ = PixVideoYUV<byte>(yf1, uf, vf);
00548         *dptr++ = PixVideoYUV<byte>(yf2, uf, vf);
00549       }
00550   ASSERT(dptr == dst.endw());
00551 
00552   return VideoFrame(dst);
00553 }
00554 
00555 // ######################################################################
00556 VideoFrame YUV411P_to_YUV24(const byte* sptr, const size_t length,
00557                             const Dims& dims, const bool byteswap)
00558 {
00559   const int w = dims.w();
00560   const int h = dims.h();
00561 
00562   checkBufferLength(length, dims.sz() + 2*(w/4)*h);
00563 
00564   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00565   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00566 
00567   const byte* yptr = sptr;
00568   const byte* uptr = yptr + w * h;
00569   const byte* vptr = uptr + (w/4) * h;
00570 
00571   if (byteswap)
00572     std::swap(uptr, vptr);
00573 
00574   for (int j = 0; j < h; ++j)
00575     for (int i = 0; i < w; i += 4)
00576       {
00577         // we have a 4 luminance pixels per chroma pair
00578         const byte uf = *uptr++;
00579         const byte vf = *vptr++;
00580 
00581         *dptr++ = PixVideoYUV<byte>(*yptr++, uf, vf);
00582         *dptr++ = PixVideoYUV<byte>(*yptr++, uf, vf);
00583         *dptr++ = PixVideoYUV<byte>(*yptr++, uf, vf);
00584         *dptr++ = PixVideoYUV<byte>(*yptr++, uf, vf);
00585       }
00586   ASSERT(dptr == dst.endw());
00587 
00588   return VideoFrame(dst);
00589 }
00590 
00591 // ######################################################################
00592 VideoFrame YUV420P_to_YUV24(const byte* sptr, const size_t length,
00593                             const Dims& dims, const bool byteswap)
00594 {
00595 
00596   const int w = dims.w();
00597   const int h = dims.h();
00598   // we have to do (w+1)/2 instead of just w/2, because if e.g. the y
00599   // array has 5 pixels, then we want the u and v arrays to have 3
00600   // pixels, not 2:
00601   const int w2 = (w+1)/2;
00602   const int h2 = (h+1)/2;
00603 
00604   checkBufferLength(length, dims.sz() + 2*w2*h2);
00605 
00606   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00607   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00608 
00609   const byte* yptr = sptr;
00610   const byte* uptr = yptr + w * h;
00611   const byte* vptr = uptr + w2 * h2;
00612 
00613   if (byteswap)
00614     std::swap(uptr, vptr);
00615 
00616   for (int j = 0; j < h; j += 2)
00617     {
00618       for (int i = 0; i < w; i += 2)
00619         {
00620           const byte u = *uptr++;
00621           const byte v = *vptr++;
00622           // we have a 2x2 luminance block per chroma pair
00623           dptr[0] = PixVideoYUV<byte>(yptr[0], u, v);
00624           dptr[1] = PixVideoYUV<byte>(yptr[1], u, v);
00625           dptr[w] = PixVideoYUV<byte>(yptr[w], u, v);
00626           dptr[w+1] = PixVideoYUV<byte>(yptr[w+1], u, v);
00627 
00628           dptr += 2;
00629           yptr += 2;
00630         }
00631       dptr += w;
00632       yptr += w;
00633     }
00634   ASSERT(dptr == dst.endw());
00635 
00636   return VideoFrame(dst);
00637 }
00638 
00639 // ######################################################################
00640 VideoFrame YUV410P_to_YUV24(const byte* sptr, const size_t length,
00641                             const Dims& dims, const bool byteswap)
00642 {
00643   const int w = dims.w();
00644   const int h = dims.h();
00645   const int w2 = w * 2;
00646   const int w3 = w * 3;
00647 
00648   checkBufferLength(length, dims.sz() * 9 / 8);
00649 
00650   Image<PixVideoYUV<byte> > dst(dims, NO_INIT);
00651   Image<PixVideoYUV<byte> >::iterator dptr = dst.beginw();
00652 
00653   const byte* yptr = sptr;
00654   const byte* uptr = yptr + w * h;
00655   const byte* vptr = uptr + (w/4) * (h/4);
00656 
00657   for (int j = 0; j < h; j += 4)
00658     {
00659       for (int i = 0; i < w; i += 4)
00660         {
00661           const byte u = *uptr++;
00662           const byte v = *vptr++;
00663 
00664           // we have a 4x4 luminance block per chroma pair
00665           dptr[0] = PixVideoYUV<byte>(*yptr, u, v);
00666           dptr[1] = PixVideoYUV<byte>(yptr[1], u, v);
00667           dptr[2] = PixVideoYUV<byte>(yptr[2], u, v);
00668           dptr[3] = PixVideoYUV<byte>(yptr[3], u, v);
00669 
00670           dptr[w] = PixVideoYUV<byte>(yptr[w], u, v);
00671           dptr[w+1] = PixVideoYUV<byte>(yptr[w+1], u, v);
00672           dptr[w+2] = PixVideoYUV<byte>(yptr[w+2], u, v);
00673           dptr[w+3] = PixVideoYUV<byte>(yptr[w+3], u, v);
00674 
00675           dptr[w2] = PixVideoYUV<byte>(yptr[w2], u, v);
00676           dptr[w2+1] = PixVideoYUV<byte>(yptr[w2+1], u, v);
00677           dptr[w2+2] = PixVideoYUV<byte>(yptr[w2+2], u, v);
00678           dptr[w2+3] = PixVideoYUV<byte>(yptr[w2+3], u, v);
00679 
00680           dptr[w3] = PixVideoYUV<byte>(yptr[w3], u, v);
00681           dptr[w3+1] = PixVideoYUV<byte>(yptr[w3+1], u, v);
00682           dptr[w3+2] = PixVideoYUV<byte>(yptr[w3+2], u, v);
00683           dptr[w3+3] = PixVideoYUV<byte>(yptr[w3+3], u, v);
00684 
00685           dptr += 4;
00686           yptr += 4;
00687         }
00688       dptr += w3;
00689       yptr += w3;
00690     }
00691   ASSERT(dptr == dst.endw());
00692 
00693   return VideoFrame(dst);
00694 }
00695 
00696 // ######################################################################
00697 // ######################################################################
00698 // conversions from YUV24
00699 // ######################################################################
00700 // ######################################################################
00701 
00702 // ######################################################################
00703 VideoFrame YUV24_to_YUV444(const byte* sptr, const size_t length,
00704                            const Dims& dims, const bool byteswap)
00705 {
00706   checkBufferLength(length, dims.sz() * 3);
00707 
00708   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() * 3, 1), NO_INIT));
00709   byte* dptr = dst.uniq().dataw();
00710   byte* stop = dst.uniq().endw();
00711 
00712   // output data stored as: u0, y0, v0, u1, y1, v1
00713 
00714   if (byteswap)
00715     while (dptr != stop)
00716       {
00717         // input data stored as: y0, v0, u0, y1, v1, u1
00718         dptr[0] = sptr[2];
00719         dptr[1] = sptr[0];
00720         dptr[2] = sptr[1];
00721         dptr[3] = sptr[5];
00722         dptr[4] = sptr[3];
00723         dptr[5] = sptr[4];
00724 
00725         dptr += 6;
00726         sptr += 6;
00727       }
00728   else
00729     while (dptr != stop)
00730       {
00731         // input data stored as: y0, u0, v0, y1, u1, v1
00732         dptr[0] = sptr[1];
00733         dptr[1] = sptr[0];
00734         dptr[2] = sptr[2];
00735         dptr[3] = sptr[4];
00736         dptr[4] = sptr[3];
00737         dptr[5] = sptr[5];
00738 
00739         dptr += 6;
00740         sptr += 6;
00741       }
00742 
00743   return VideoFrame(dst, dims, VIDFMT_YUV444, false);
00744 }
00745 
00746 // ######################################################################
00747 VideoFrame YUV24_to_YUYV(const byte* sptr, const size_t length,
00748                          const Dims& dims, const bool byteswap)
00749 {
00750   checkBufferLength(length, dims.sz() * 3);
00751 
00752   ASSERT(dims.w() % 2 == 0);
00753 
00754   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() * 2, 1), NO_INIT));
00755   byte* dptr = dst.uniq().dataw();
00756   byte* stop = dst.uniq().endw();
00757 
00758   // output data stored as: y0, u, y1, v
00759 
00760   if (byteswap)
00761     while (dptr != stop)
00762       {
00763         // input data stored as: y0, v0, u0, y1, v1, u1
00764         dptr[0] = sptr[0];
00765         dptr[1] = byte((sptr[2] + sptr[5])/2.0 + 0.5);
00766         dptr[2] = sptr[3];
00767         dptr[3] = byte((sptr[1] + sptr[4])/2.0 + 0.5);
00768 
00769         dptr += 4;
00770         sptr += 6;
00771       }
00772   else
00773     while (dptr != stop)
00774       {
00775         // input data stored as: y0, u0, v0, y1, u1, v1
00776         dptr[0] = sptr[0];
00777         dptr[1] = byte((sptr[1] + sptr[4])/2.0 + 0.5);
00778         dptr[2] = sptr[3];
00779         dptr[3] = byte((sptr[2] + sptr[5])/2.0 + 0.5);
00780 
00781         dptr += 4;
00782         sptr += 6;
00783       }
00784 
00785   return VideoFrame(dst, dims, VIDFMT_YUYV, false);
00786 }
00787 
00788 // ######################################################################
00789 ArrayHandle<byte> YUV24_to_UYVYx(const byte* sptr, const size_t length,
00790                                  const Dims& dims, const bool byteswap)
00791 {
00792   checkBufferLength(length, dims.sz() * 3);
00793 
00794   ASSERT(dims.w() % 2 == 0);
00795 
00796   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() * 2, 1), NO_INIT));
00797   byte* dptr = dst.uniq().dataw();
00798   byte* stop = dst.uniq().endw();
00799 
00800   // output data stored as: u, y0, v, y1
00801 
00802   if (byteswap)
00803     while (dptr != stop)
00804       {
00805         // input data stored as: y0, v0, u0, y1, v1, u1
00806         dptr[0] = byte((sptr[2] + sptr[5])/2.0 + 0.5);
00807         dptr[1] = sptr[0];
00808         dptr[2] = byte((sptr[1] + sptr[4])/2.0 + 0.5);
00809         dptr[3] = sptr[3];
00810 
00811         dptr += 4;
00812         sptr += 6;
00813       }
00814   else
00815     while (dptr != stop)
00816       {
00817         // input data stored as: y0, u0, v0, y1, u1, v1
00818         dptr[0] = byte((sptr[1] + sptr[4])/2.0 + 0.5);
00819         dptr[1] = sptr[0];
00820         dptr[2] = byte((sptr[2] + sptr[5])/2.0 + 0.5);
00821         dptr[3] = sptr[3];
00822 
00823         dptr += 4;
00824         sptr += 6;
00825       }
00826 
00827   return dst;
00828 }
00829 
00830 // ######################################################################
00831 VideoFrame YUV24_to_UYVY(const byte* sptr, const size_t length,
00832                          const Dims& dims, const bool byteswap)
00833 {
00834   return VideoFrame(YUV24_to_UYVYx(sptr, length, dims, byteswap),
00835                     dims, VIDFMT_UYVY, false);
00836 }
00837 
00838 // ######################################################################
00839 VideoFrame YUV24_to_YUV422(const byte* sptr, const size_t length,
00840                            const Dims& dims, const bool byteswap)
00841 {
00842   return VideoFrame(YUV24_to_UYVYx(sptr, length, dims, byteswap),
00843                     dims, VIDFMT_YUV422, false);
00844 }
00845 
00846 // ######################################################################
00847 VideoFrame YUV24_to_YUV411(const byte* sptr, const size_t length,
00848                            const Dims& dims, const bool byteswap)
00849 {
00850   checkBufferLength(length, dims.sz() * 3);
00851 
00852   ASSERT(dims.w() % 4 == 0);
00853 
00854   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() * 3 / 2, 1), NO_INIT));
00855   byte* dptr = dst.uniq().dataw();
00856   byte* stop = dst.uniq().endw();
00857 
00858   // output data stored as: u, y0, y1, v, y2, y3
00859 
00860   if (byteswap)
00861     while (dptr != stop)
00862       {
00863         // input data stored as: y0, v0, u0, y1, v1, u1, y2, v2, y2, y3, v3, u3
00864         dptr[0] = byte((sptr[2] + sptr[5] + sptr[8] + sptr[11])/4.0 + 0.5);
00865         dptr[1] = sptr[0];
00866         dptr[2] = sptr[3];
00867         dptr[3] = byte((sptr[1] + sptr[4] + sptr[7] + sptr[10])/4.0 + 0.5);
00868         dptr[4] = sptr[6];
00869         dptr[5] = sptr[9];
00870 
00871         dptr += 6;
00872         sptr += 12;
00873       }
00874   else
00875     while (dptr != stop)
00876       {
00877         // input data stored as: y0, u0, v0, y1, u1, v1, y2, u2, v2, y3, u3, v3
00878         dptr[0] = byte((sptr[1] + sptr[4] + sptr[7] + sptr[10])/4.0 + 0.5);
00879         dptr[1] = sptr[0];
00880         dptr[2] = sptr[3];
00881         dptr[3] = byte((sptr[2] + sptr[5] + sptr[8] + sptr[11])/4.0 + 0.5);
00882         dptr[4] = sptr[6];
00883         dptr[5] = sptr[9];
00884 
00885         dptr += 6;
00886         sptr += 12;
00887       }
00888 
00889   return VideoFrame(dst, dims, VIDFMT_YUV411, false);
00890 }
00891 
00892 // ######################################################################
00893 VideoFrame YUV24_to_YUV444P(const byte* sptr, const size_t length,
00894                             const Dims& dims, const bool byteswap)
00895 {
00896   checkBufferLength(length, dims.sz() * 3);
00897 
00898   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() * 3, 1), NO_INIT));
00899   byte* yptr = dst.uniq().dataw();
00900   byte* uptr = yptr + dims.sz();
00901   byte* vptr = uptr + dims.sz();
00902 
00903   if (byteswap)
00904     std::swap(uptr, vptr);
00905 
00906   const byte* sstop = sptr + length;
00907 
00908   while (sptr != sstop)
00909     {
00910       *yptr++ = *sptr++;
00911       *uptr++ = *sptr++;
00912       *vptr++ = *sptr++;
00913     }
00914 
00915   return VideoFrame(dst, dims, VIDFMT_YUV444P, false);
00916 }
00917 
00918 // ######################################################################
00919 VideoFrame YUV24_to_YUV422P(const byte* sptr, const size_t length,
00920                             const Dims& dims, const bool byteswap)
00921 {
00922   checkBufferLength(length, dims.sz() * 3);
00923 
00924   ASSERT(dims.w() % 2 == 0);
00925 
00926   const int w = dims.w();
00927   const int h = dims.h();
00928 
00929   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() + 2 * (w/2)*h, 1), NO_INIT));
00930   byte* yptr = dst.uniq().dataw();
00931   byte* uptr = yptr + w * h;
00932   byte* vptr = uptr + (w/2) * h;
00933 
00934   if (byteswap)
00935     std::swap(uptr, vptr);
00936 
00937   const byte* sstop = sptr + length;
00938 
00939   while (sptr != sstop)
00940     {
00941       *yptr++ = sptr[0];
00942       *yptr++ = sptr[3];
00943       *uptr++ = byte((sptr[1]+sptr[4])/2.0 + 0.5);
00944       *vptr++ = byte((sptr[2]+sptr[5])/2.0 + 0.5);
00945       sptr += 6;
00946     }
00947 
00948   return VideoFrame(dst, dims, VIDFMT_YUV422P, false);
00949 }
00950 
00951 // ######################################################################
00952 VideoFrame YUV24_to_YUV411P(const byte* sptr, const size_t length,
00953                             const Dims& dims, const bool byteswap)
00954 {
00955   checkBufferLength(length, dims.sz() * 3);
00956 
00957   ASSERT(dims.w() % 4 == 0);
00958 
00959   const int w = dims.w();
00960   const int h = dims.h();
00961 
00962   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() + 2 * (w/4)*h, 1), NO_INIT));
00963   byte* yptr = dst.uniq().dataw();
00964   byte* uptr = yptr + w * h;
00965   byte* vptr = uptr + (w/4) * h;
00966 
00967   if (byteswap)
00968     std::swap(uptr, vptr);
00969 
00970   const byte* sstop = sptr + length;
00971 
00972   while (sptr != sstop)
00973     {
00974       *yptr++ = sptr[0];
00975       *yptr++ = sptr[3];
00976       *yptr++ = sptr[6];
00977       *yptr++ = sptr[9];
00978       *uptr++ = byte((sptr[1]+sptr[4]+sptr[7]+sptr[10])/4.0 + 0.5);
00979       *vptr++ = byte((sptr[2]+sptr[5]+sptr[8]+sptr[11])/4.0 + 0.5);
00980       sptr += 12;
00981     }
00982 
00983   return VideoFrame(dst, dims, VIDFMT_YUV411P, false);
00984 }
00985 
00986 // ######################################################################
00987 VideoFrame YUV24_to_YUV420P(const byte* sptr, const size_t length,
00988                             const Dims& dims, const bool byteswap)
00989 {
00990   checkBufferLength(length, dims.sz() * 3);
00991 
00992   ASSERT(dims.w() % 2 == 0);
00993   ASSERT(dims.h() % 2 == 0);
00994 
00995   const int w = dims.w();
00996   const int h = dims.h();
00997 
00998   const int w2 = (w+1)/2;
00999   const int h2 = (h+1)/2;
01000 
01001   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() + 2*w2*h2, 1), NO_INIT));
01002   byte* yptr = dst.uniq().dataw();
01003   byte* uptr = yptr + w * h;
01004   byte* vptr = uptr + w2 * h2;
01005 
01006   if (byteswap)
01007     std::swap(uptr, vptr);
01008 
01009   const byte* sstop = sptr + length;
01010 
01011   for (int j = 0; j < h; j += 2)
01012     {
01013       for (int i = 0; i < w; i += 2)
01014         {
01015           yptr[0] = sptr[0];
01016           yptr[1] = sptr[3];
01017           yptr[w] = sptr[3*w];
01018           yptr[w+1] = sptr[3*w+3];
01019           *uptr++ = byte((sptr[1] + sptr[4] + sptr[3*w+1] + sptr[3*w+4])/4.0 + 0.5);
01020           *vptr++ = byte((sptr[2] + sptr[5] + sptr[3*w+2] + sptr[3*w+5])/4.0 + 0.5);
01021 
01022           yptr += 2;
01023           sptr += 6;
01024         }
01025 
01026       yptr += w;
01027       sptr += 3*w;
01028     }
01029 
01030   ASSERT(sptr == sstop);
01031 
01032   return VideoFrame(dst, dims, VIDFMT_YUV420P, false);
01033 }
01034 
01035 // ######################################################################
01036 VideoFrame YUV24_to_YUV410P(const byte* sptr, const size_t length,
01037                             const Dims& dims, const bool byteswap)
01038 {
01039   checkBufferLength(length, dims.sz() * 3);
01040 
01041   ASSERT(dims.w() % 4 == 0);
01042   ASSERT(dims.h() % 4 == 0);
01043 
01044   const int w = dims.w();
01045   const int h = dims.h();
01046 
01047   ArrayHandle<byte> dst(new ArrayData<byte>(Dims(dims.sz() * 9 / 8, 1), NO_INIT));
01048   byte* yptr = dst.uniq().dataw();
01049   byte* uptr = yptr + w * h;
01050   byte* vptr = uptr + (w/4) * (h/4);
01051 
01052   if (byteswap)
01053     std::swap(uptr, vptr);
01054 
01055   const byte* sstop = sptr + length;
01056 
01057   for (int j = 0; j < h; j += 4)
01058     {
01059       for (int i = 0; i < w; i += 4)
01060         {
01061           yptr[0] = sptr[0];
01062           yptr[1] = sptr[3];
01063           yptr[2] = sptr[6];
01064           yptr[3] = sptr[9];
01065           yptr[w] = sptr[3*w];
01066           yptr[w+1] = sptr[3*w+3];
01067           yptr[w+2] = sptr[3*w+6];
01068           yptr[w+3] = sptr[3*w+9];
01069           yptr[2*w] = sptr[6*w];
01070           yptr[2*w+1] = sptr[6*w+3];
01071           yptr[2*w+2] = sptr[6*w+6];
01072           yptr[2*w+3] = sptr[6*w+9];
01073           yptr[3*w] = sptr[9*w];
01074           yptr[3*w+1] = sptr[9*w+3];
01075           yptr[3*w+2] = sptr[9*w+6];
01076           yptr[3*w+3] = sptr[9*w+9];
01077 
01078           *uptr++ = byte((sptr[1] + sptr[4] + sptr[7] + sptr[10] +
01079                           sptr[3*w+1] + sptr[3*w+4] + sptr[3*w+7] + sptr[3*w+10] +
01080                           sptr[6*w+1] + sptr[6*w+4] + sptr[6*w+7] + sptr[6*w+10] +
01081                           sptr[9*w+1] + sptr[9*w+4] + sptr[9*w+7] + sptr[9*w+10])
01082                          / 16.0 + 0.5);
01083           *vptr++ = byte((sptr[2] + sptr[5] + sptr[8] + sptr[11] +
01084                           sptr[3*w+2] + sptr[3*w+5] + sptr[3*w+8] + sptr[3*w+11] +
01085                           sptr[6*w+2] + sptr[6*w+5] + sptr[6*w+8] + sptr[6*w+11] +
01086                           sptr[9*w+2] + sptr[9*w+5] + sptr[9*w+8] + sptr[9*w+11])
01087                          / 16.0 + 0.5);
01088 
01089           yptr += 4;
01090           sptr += 12;
01091         }
01092 
01093       yptr += 3*w;
01094       sptr += 9*w;
01095     }
01096 
01097   ASSERT(sptr == sstop);
01098 
01099   return VideoFrame(dst, dims, VIDFMT_YUV410P, false);
01100 }
01101 
01102 // ######################################################################
01103 // ######################################################################
01104 // VideoFormatConverter
01105 // ######################################################################
01106 // ######################################################################
01107 
01108 // ######################################################################
01109 VideoFormatConverter::VideoFormatConverter(VideoFormatConverter::Func* f,
01110                                            unsigned int w,
01111                                            VideoFormat s, VideoFormat d,
01112                                            const char* fn)
01113   : func(f), weight(w), src(s), dst(d), fname(fn)
01114 {}
01115 
01116 // ######################################################################
01117 VideoFrame VideoFormatConverter::apply(const VideoFrame& in) const
01118 {
01119   if (func == NULL)
01120     LFATAL("oops! this is an invalid VideoFormatConverter");
01121 
01122   if (in.getMode() != this->src)
01123     LFATAL("oops! src frame is %s, but converter (%s) expects %s",
01124            convertToString(in.getMode()).c_str(),
01125            this->fname.c_str(),
01126            convertToString(this->src).c_str());
01127 
01128   const VideoFrame result =
01129     (*this->func)(in.getBuffer(), in.getBufSize(),
01130                   in.getDims(), in.getByteSwap());
01131 
01132   ASSERT(result.getMode() == this->dst);
01133 
01134   return result;
01135 }
01136 
01137 
01138 // ######################################################################
01139 // ######################################################################
01140 // VideoFormatCoercion
01141 // ######################################################################
01142 // ######################################################################
01143 
01144 // ######################################################################
01145 VideoFormatCoercion::VideoFormatCoercion()
01146 {}
01147 
01148 // ######################################################################
01149 VideoFormatCoercion::VideoFormatCoercion(unsigned int w,
01150                                          VideoFormat s, VideoFormat d,
01151                                          VideoFormatConverter::Func* f,
01152                                          const char* fname)
01153 {
01154   if (f != NULL)
01155     nodes.push_back(VideoFormatConverter(f, w, s, d, fname));
01156 }
01157 
01158 // ######################################################################
01159 std::string VideoFormatCoercion::describe() const
01160 {
01161   if (nodes.empty())
01162     return std::string("nil");
01163 
01164   unsigned int w = 0;
01165   for (size_t i = 0; i < nodes.size(); ++i)
01166     w += nodes[i].weight;
01167 
01168   std::string result = sformat("%s -> %s [wt=%u] (",
01169                                convertToString(nodes.front().src).c_str(),
01170                                convertToString(nodes.back().dst).c_str(),
01171                                w);
01172 
01173   for (size_t i = 0; i < nodes.size(); ++i)
01174     {
01175       result += sformat("%s [wt=%u]",
01176                         nodes[i].fname.c_str(), nodes[i].weight);
01177 
01178       if (i+1 < nodes.size())
01179         result += "; ";
01180     }
01181 
01182   result += ")";
01183   return result;
01184 }
01185 
01186 // ######################################################################
01187 VideoFrame VideoFormatCoercion::apply(const VideoFrame& src) const
01188 {
01189   if (nodes.size() == 0)
01190     LFATAL("oops! this is an invalid VideoFormat converter");
01191 
01192   VideoFrame result = src;
01193   for (size_t i = 0; i < nodes.size(); ++i)
01194     result = nodes[i].apply(result);
01195 
01196   return result;
01197 }
01198 
01199 
01200 // ######################################################################
01201 // ######################################################################
01202 // conversion table
01203 // ######################################################################
01204 // ######################################################################
01205 
01206 static VideoFormatCoercion pathtab[VIDFMT_AUTO+1][VIDFMT_AUTO+1];
01207 
01208 static const unsigned int PRECISION_PENALTY = 100; // loss of pixel bit depth (e.g. rgb24->rgb565)
01209 static const unsigned int RESOLUTION_PENALTY = 10000; // loss of spatial resolution (e.g. yuv24 -> yuv411)
01210 static const unsigned int COLORSPACE_PENALTY = 1000000; // lossy colorspace conversion (e.g. rgb->yuv)
01211 static const unsigned int COLORDIMENSION_PENALTY = 100000000; // reduction in the number of color planes (e.g., rgb->grey)
01212 
01213 static void initDirectConversions(VideoFormatCoercion table[VIDFMT_AUTO+1][VIDFMT_AUTO+1])
01214 {
01215 #define CONVERT(vf1, vf2, wt, f)                                \
01216   table[vf1][vf2] = VideoFormatCoercion(wt, vf1, vf2, f, #f)
01217 
01218   // conversions to RGB24
01219   CONVERT(VIDFMT_GREY,      VIDFMT_RGB24, 1, &GREY_to_RGB24);
01220   CONVERT(VIDFMT_RGB555,    VIDFMT_RGB24, 1, &RGB555_to_RGB24);
01221   CONVERT(VIDFMT_RGB565,    VIDFMT_RGB24, 1, &RGB565_to_RGB24);
01222   CONVERT(VIDFMT_RGB32,     VIDFMT_RGB24, 1, &ARGB_to_RGB24);
01223   CONVERT(VIDFMT_YUYV,      VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUYV_to_RGB24);
01224   CONVERT(VIDFMT_UYVY,      VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUV422_to_RGB24);
01225   CONVERT(VIDFMT_YUV444,    VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUV444_to_RGB24);
01226   CONVERT(VIDFMT_YUV422,    VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUV422_to_RGB24);
01227   CONVERT(VIDFMT_YUV411,    VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUV411_to_RGB24);
01228   //CONVERT(VIDFMT_YUV420,    VIDFMT_RGB24, 1+COLORSPACE_PENALTY, NULL); /* not implemented: what is YUV420? */
01229   //CONVERT(VIDFMT_YUV410,    VIDFMT_RGB24, 1+COLORSPACE_PENALTY, NULL); /* not implemented: what is YUV410? */
01230   CONVERT(VIDFMT_YUV444P,   VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUV444P_to_RGB24);
01231   CONVERT(VIDFMT_YUV422P,   VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUV422P_to_RGB24);
01232   CONVERT(VIDFMT_YUV411P,   VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUV411P_to_RGB24);
01233   CONVERT(VIDFMT_YUV420P,   VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUV420P_to_RGB24);
01234   CONVERT(VIDFMT_YUV410P,   VIDFMT_RGB24, 1+COLORSPACE_PENALTY, &YUV410P_to_RGB24);
01235 
01236   // conversions from RGB24
01237   CONVERT(VIDFMT_RGB24, VIDFMT_GREY,      1+COLORDIMENSION_PENALTY, &RGB24_to_GREY);
01238   CONVERT(VIDFMT_RGB24, VIDFMT_RGB555,    1+PRECISION_PENALTY, &RGB24_to_RGB555);
01239   CONVERT(VIDFMT_RGB24, VIDFMT_RGB565,    1+PRECISION_PENALTY, &RGB24_to_RGB565);
01240   CONVERT(VIDFMT_RGB24, VIDFMT_RGB32,     1, &RGB24_to_RGB32);
01241   CONVERT(VIDFMT_RGB24, VIDFMT_YUV24,     1+COLORSPACE_PENALTY, &RGB24_to_YUV24);
01242   //CONVERT(VIDFMT_RGB24, VIDFMT_YUYV,      1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01243   //CONVERT(VIDFMT_RGB24, VIDFMT_UYVY,      1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01244   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV444,    1+COLORSPACE_PENALTY, NULL); /* could be implemented in the future */
01245   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV422,    1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01246   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV411,    1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01247   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV420,    1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* not implemented: what is YUV420? */
01248   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV410,    1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* not implemented: what is YUV410? */
01249   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV444P,   1+COLORSPACE_PENALTY, NULL); /* could be implemented in the future */
01250   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV422P,   1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01251   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV411P,   1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01252   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV420P,   1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01253   //CONVERT(VIDFMT_RGB24, VIDFMT_YUV410P,   1+COLORSPACE_PENALTY+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01254 
01255   // conversions to YUV24
01256   CONVERT(VIDFMT_GREY,      VIDFMT_YUV24, 1+COLORSPACE_PENALTY, &GREY_to_YUV24);
01257   //CONVERT(VIDFMT_RGB555,    VIDFMT_YUV24, 1+COLORSPACE_PENALTY, NULL); /* could be implemented in the future */
01258   //CONVERT(VIDFMT_RGB565,    VIDFMT_YUV24, 1+COLORSPACE_PENALTY, NULL); /* could be implemented in the future */
01259   CONVERT(VIDFMT_RGB24,     VIDFMT_YUV24, 1+COLORSPACE_PENALTY, &RGB24_to_YUV24);
01260   CONVERT(VIDFMT_RGB32,     VIDFMT_YUV24, 1+COLORSPACE_PENALTY, &RGB32_to_YUV24);
01261   CONVERT(VIDFMT_YUYV,      VIDFMT_YUV24, 1, &YUYV_to_YUV24);
01262   CONVERT(VIDFMT_UYVY,      VIDFMT_YUV24, 1, &YUV422_to_YUV24);
01263   CONVERT(VIDFMT_YUV444,    VIDFMT_YUV24, 1, &YUV444_to_YUV24);
01264   CONVERT(VIDFMT_YUV422,    VIDFMT_YUV24, 1, &YUV422_to_YUV24);
01265   CONVERT(VIDFMT_YUV411,    VIDFMT_YUV24, 1, &YUV411_to_YUV24);
01266   //CONVERT(VIDFMT_YUV420,    VIDFMT_YUV24, 1, NULL); /* not implemented: what is YUV420? */
01267   //CONVERT(VIDFMT_YUV410,    VIDFMT_YUV24, 1, NULL); /* not implemented: what is YUV410? */
01268   CONVERT(VIDFMT_YUV444P,   VIDFMT_YUV24, 1, &YUV444P_to_YUV24);
01269   CONVERT(VIDFMT_YUV422P,   VIDFMT_YUV24, 1, &YUV422P_to_YUV24);
01270   CONVERT(VIDFMT_YUV411P,   VIDFMT_YUV24, 1, &YUV411P_to_YUV24);
01271   CONVERT(VIDFMT_YUV420P,   VIDFMT_YUV24, 1, &YUV420P_to_YUV24);
01272   CONVERT(VIDFMT_YUV410P,   VIDFMT_YUV24, 1, &YUV410P_to_YUV24);
01273 
01274   // conversions from YUV24
01275   //CONVERT(VIDFMT_YUV24, VIDFMT_GREY,      1+COLORSPACE_PENALTY, NULL); /* could be implemented in the future */
01276   //CONVERT(VIDFMT_YUV24, VIDFMT_RGB555,    1+COLORSPACE_PENALTY+PRECISION_PENALTY, NULL); /* could be implemented in the future */
01277   //CONVERT(VIDFMT_YUV24, VIDFMT_RGB565,    1+COLORSPACE_PENALTY+PRECISION_PENALTY, NULL); /* could be implemented in the future */
01278   CONVERT(VIDFMT_YUV24, VIDFMT_RGB24,     1+COLORSPACE_PENALTY, &YUV24_to_RGB24);
01279   //CONVERT(VIDFMT_YUV24, VIDFMT_RGB32,     1+COLORSPACE_PENALTY, NULL); /* could be implemented in the future */
01280   CONVERT(VIDFMT_YUV24, VIDFMT_YUYV,      1+RESOLUTION_PENALTY, &YUV24_to_YUYV);
01281   CONVERT(VIDFMT_YUV24, VIDFMT_UYVY,      1+RESOLUTION_PENALTY, &YUV24_to_UYVY);
01282   CONVERT(VIDFMT_YUV24, VIDFMT_YUV444,    1, &YUV24_to_YUV444);
01283   CONVERT(VIDFMT_YUV24, VIDFMT_YUV422,    1+RESOLUTION_PENALTY, &YUV24_to_YUV422);
01284   CONVERT(VIDFMT_YUV24, VIDFMT_YUV411,    1+RESOLUTION_PENALTY, &YUV24_to_YUV411);
01285   //CONVERT(VIDFMT_YUV24, VIDFMT_YUV420,    1+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01286   //CONVERT(VIDFMT_YUV24, VIDFMT_YUV410,    1+RESOLUTION_PENALTY, NULL); /* could be implemented in the future */
01287   CONVERT(VIDFMT_YUV24, VIDFMT_YUV444P,   1, &YUV24_to_YUV444P);
01288   CONVERT(VIDFMT_YUV24, VIDFMT_YUV422P,   1+RESOLUTION_PENALTY, &YUV24_to_YUV422P);
01289   CONVERT(VIDFMT_YUV24, VIDFMT_YUV411P,   1+RESOLUTION_PENALTY, &YUV24_to_YUV411P);
01290   CONVERT(VIDFMT_YUV24, VIDFMT_YUV420P,   1+RESOLUTION_PENALTY, &YUV24_to_YUV420P);
01291   CONVERT(VIDFMT_YUV24, VIDFMT_YUV410P,   1+RESOLUTION_PENALTY, &YUV24_to_YUV410P);
01292 
01293 #undef CONVERT
01294 }
01295 
01296 namespace
01297 {
01298   struct Heap
01299   {
01300     int deletemin()
01301     {
01302       ASSERT(!h.empty());
01303 
01304       unsigned int best = h[0].second;
01305       size_t bestpos = 0;
01306 
01307       for (size_t i = 1; i < h.size(); ++i)
01308         if (h[i].second < best)
01309           {
01310             best = h[i].second;
01311             bestpos = i;
01312           }
01313 
01314       int result = h[bestpos].first;
01315       h.erase(h.begin() + bestpos);
01316       return result;
01317     }
01318 
01319     void decreasekey(int v, unsigned int dist)
01320     {
01321       for (size_t i = 0; i < h.size(); ++i)
01322         if (h[i].first == v)
01323           {
01324             h[i].second = dist;
01325             return;
01326           }
01327     }
01328 
01329     std::vector<std::pair<int, unsigned int> > h;
01330   };
01331 }
01332 
01333 void initIndirectConversions(VideoFormatCoercion table[VIDFMT_AUTO+1][VIDFMT_AUTO+1])
01334 {
01335   // Dijkstra's shortest-path algorithm to find the optimal sequence
01336   // of atomic conversions to achieve an arbitrary coercion
01337 
01338   for (int src = 0; src <= VIDFMT_AUTO; ++src)
01339     {
01340       unsigned int dist[VIDFMT_AUTO+1];
01341       int prev[VIDFMT_AUTO+1];
01342 
01343       for (int u = 0; u <= VIDFMT_AUTO; ++u)
01344         {
01345           dist[u] = std::numeric_limits<unsigned int>::max();
01346           prev[u] = -1;
01347         }
01348       dist[src] = 0;
01349 
01350       Heap H;
01351       for (int u = 0; u <= VIDFMT_AUTO; ++u)
01352         if (u == src)
01353           H.h.push_back(std::make_pair(u, 0));
01354         else if (table[src][u].isDirect())
01355           H.h.push_back(std::make_pair(u, table[src][u].nodes[0].weight));
01356         else
01357           H.h.push_back(std::make_pair(u, std::numeric_limits<unsigned int>::max()));
01358 
01359       while (!H.h.empty())
01360         {
01361           const int u = H.deletemin();
01362           for (int v = 0; v <= VIDFMT_AUTO; ++v)
01363             {
01364               if (table[u][v].nodes.size() != 1)
01365                 continue;
01366 
01367               if (double(dist[v]) > double(dist[u]) + double(table[u][v].nodes[0].weight))
01368                 {
01369                   dist[v] = dist[u] + table[u][v].nodes[0].weight;
01370                   prev[v] = u;
01371 
01372                   H.decreasekey(v, dist[v]);
01373                 }
01374             }
01375         }
01376 
01377       for (int u = 0; u <= VIDFMT_AUTO; ++u)
01378         {
01379           if (u != src && prev[u] != -1)
01380             {
01381               VideoFormatCoercion p;
01382               int v = u;
01383               while (v != src)
01384                 {
01385                   ASSERT(table[prev[v]][v].isDirect());
01386                   p.nodes.push_front(table[prev[v]][v].nodes[0]);
01387                   v = prev[v];
01388                 }
01389 
01390               table[src][u] = p;
01391             }
01392         }
01393     }
01394 }
01395 
01396 // ######################################################################
01397 const VideoFormatCoercion& findConverter(const VideoFormat srcformat,
01398                                          const VideoFormat dstformat)
01399 {
01400   static bool inited = false;
01401   if (!inited)
01402     {
01403       initDirectConversions(pathtab);
01404       initIndirectConversions(pathtab);
01405       inited = true;
01406     }
01407 
01408   ASSERT(srcformat >= 0);
01409   ASSERT(srcformat <= VIDFMT_AUTO);
01410   ASSERT(dstformat >= 0);
01411   ASSERT(dstformat <= VIDFMT_AUTO);
01412 
01413   return pathtab[srcformat][dstformat];
01414 }
01415 
01416 // ######################################################################
01417 VideoFrame coerceVideoFormat(const VideoFrame& src,
01418                              const VideoFormat dstformat)
01419 {
01420   if (src.getMode() == dstformat)
01421     return src;
01422 
01423   const VideoFormatCoercion& c = findConverter(src.getMode(), dstformat);
01424 
01425   return c.apply(src);
01426 }
01427 
01428 // ######################################################################
01429 void printCoercionTable()
01430 {
01431   for (size_t n = 7; n-- > 0; )
01432     {
01433       std::string line = sformat("%7s ", "");
01434 
01435       for (int i = 0; i <= VIDFMT_AUTO; ++i)
01436         {
01437           const std::string m = convertToString(VideoFormat(i));
01438           if (n < m.size())
01439             line += sformat(" %c", m[m.size() - 1 - n]);
01440           else
01441             line += "  ";
01442         }
01443 
01444       LINFO("%s", line.c_str());
01445     }
01446 
01447   {
01448     std::string line = sformat("%7s ", "");
01449     for (int i = 0; i <= VIDFMT_AUTO; ++i)
01450       line += " -";
01451 
01452     LINFO("%s", line.c_str());
01453   }
01454 
01455   for (int i = 0; i <= VIDFMT_AUTO; ++i)
01456     {
01457       std::string line =
01458         sformat("%7s|", convertToString(VideoFormat(i)).c_str());
01459       for (int j = 0; j <= VIDFMT_AUTO; ++j)
01460         if (i == j)
01461           line += " 0";
01462         else
01463           {
01464             const VideoFormatCoercion& c =
01465               findConverter(VideoFormat(i), VideoFormat(j));
01466             if (c.nodes.empty())
01467               line += " .";
01468             else
01469               line += sformat(" %d", int(c.nodes.size()));
01470           }
01471 
01472       LINFO("%s", line.c_str());
01473     }
01474 }
01475 
01476 // ######################################################################
01477 /* So things look consistent in everyone's emacs... */
01478 /* Local Variables: */
01479 /* mode: c++ */
01480 /* indent-tabs-mode: nil */
01481 /* End: */
01482 
01483 #endif // VIDEO_VIDEOFORMATCOERCION_C_DEFINED
Generated on Sun May 8 08:07:05 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3