DepthMotionChannel.C

Go to the documentation of this file.
00001 /*!@file Channels/DepthMotionChannel.C A depth channel. */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Laurent Itti
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Channels/DepthMotionChannel.C $
00035 // $Id: DepthMotionChannel.C 14293 2010-12-02 01:57:25Z itti $
00036 
00037 #include "Channels/DepthMotionChannel.H"
00038 
00039 #include "Channels/OrientationChannel.H"
00040 #include "Channels/FlickerChannel.H"
00041 #include "Channels/IntensityChannel.H"
00042 #include "Channels/ChannelOpts.H"
00043 #include "Component/OptionManager.H"
00044 #include "Image/ColorOps.H"
00045 #include "Image/PyramidOps.H" // for buildPyrGaussian()
00046 #include "rutz/mutex.h"
00047 #include "rutz/trace.h"
00048 
00049 
00050 DepthMotionChannel::DepthMotionChannel(OptionManager& mgr) :
00051   ComplexChannel(mgr, "DepthMotion", "DepthMotion", DEPTHMOTION),
00052   itsLevelSpec(&OPT_LevelSpec, this), 
00053   itsPyrType("MotionChannelPyramidType", this, Gaussian5),
00054   itsNumDirs(&OPT_NumDirections, this) // see Channels/ChannelOpts.{H,C}
00055 {
00056 GVX_TRACE(__PRETTY_FUNCTION__);
00057 
00058   // create a bunch of subchannels:
00059 buildSubChans();
00060   //this->addSubChan(makeSharedComp(new IntensityChannel(mgr, "depi", "DepthIntensity")));
00061   //this->addSubChan(makeSharedComp(new FlickerChannel(mgr, "depf", "DepthFlicker")));
00062   //this->addSubChan(makeSharedComp(new OrientationChannel(mgr, "depo", "DepthOrientation", "depth")));
00063 }
00064 
00065 // ######################################################################
00066 DepthMotionChannel::~DepthMotionChannel()
00067 {
00068 GVX_TRACE(__PRETTY_FUNCTION__);
00069 }
00070 
00071 // ######################################################################
00072 DirectionChannel& DepthMotionChannel::dirChan(const uint idx) const
00073 {
00074 GVX_TRACE(__PRETTY_FUNCTION__);
00075   return *(dynCast<DirectionChannel>(subChan(idx)));
00076 }
00077 
00078 //########################################################################
00079 
00080 void DepthMotionChannel::buildSubChans()
00081 {
00082 GVX_TRACE(__PRETTY_FUNCTION__);
00083   // kill any subchans we may have had...
00084   this->removeAllSubChans();
00085 
00086   // let's instantiate our subchannels now that we know how many we
00087   // want. They will inherit the current values (typically
00088   // post-command-line parsing) of all their options as they are
00089   // constructed:
00090   LINFO("Using %d directions spanning [0..360]deg", itsNumDirs.getVal());
00091   for (uint i = 0; i < itsNumDirs.getVal(); ++i)
00092     {
00093       nub::ref<DirectionChannel> chan =
00094         makeSharedComp
00095         (new DirectionChannel(getManager(), i,
00096                               360.0 * double(i) /
00097                               double(itsNumDirs.getVal()),
00098                               itsPyrType.getVal()));
00099       this->addSubChan(chan);
00100 
00101       chan->exportOptions(MC_RECURSE);
00102     }
00103 }
00104 // ######################################################################
00105 void DepthMotionChannel::paramChanged(ModelParamBase* const param,
00106                                  const bool valueChanged,
00107                                  ParamClient::ChangeStatus* status)
00108 {
00109 GVX_TRACE(__PRETTY_FUNCTION__);
00110   ComplexChannel::paramChanged(param, valueChanged, status);
00111 
00112   // if the param is our number of orientations and it has become
00113   // different from our number of channels, let's reconfigure:
00114   if (param == &itsNumDirs &&
00115       numChans() != itsNumDirs.getVal())
00116     buildSubChans();
00117 }
00118 
00119 // ######################################################################
00120 void DepthMotionChannel::doInput(const InputFrame& origframe)
00121 {
00122 GVX_TRACE(__PRETTY_FUNCTION__);
00123 
00124   /* Look for a special depth image from the retina. This is held in
00125      the retina along with the standard image in certain special
00126      instances such as when using an Open Scene Graph scene, or a
00127      Kinect FrameGrabber and is passed along as part of the
00128      InputFrame. In the future this might be placed as a new region
00129      (LGN?) since it can be cross used with a stereo channel. We need
00130      it here so we can switch pass a new depth-based InputFrame to our
00131      subchannels. */
00132   if (origframe.hasDepthImage()) {
00133     const Image<uint16>  idi = origframe.getDepthImage();
00134     const Image<byte>    cm  = origframe.clipMask();
00135 
00136     // convert depth to float and normalize it:
00137     Image<float> df = idi;
00138     df *= 0.125F; // NOTE: Assumes 12-bit depth image
00139 
00140     InputFrame depthframe = InputFrame::fromGrayFloat(&df, origframe.time(), &cm, InputFrame::emptyCache);
00141 /*
00142     rutz::mutex_lock_class lock;
00143     if (depthframe.pyrCache().get() != 0 && depthframe.pyrCache()->gaussian5.beginSet(depthframe.grayFloat(), &lock)) {
00144       LINFO("Computing depth pyramid");
00145       depthframe.pyrCache()->
00146         gaussian5.endSet(depthframe.grayFloat(),
00147                          buildPyrGaussian(depthframe.grayFloat(), 0, itsLevelSpec.getVal().maxDepth(), 5), &lock);
00148     } else {
00149       LINFO("Problem with depth pyramid");
00150       depthframe = origframe;
00151     }
00152 
00153     // send input to all our subchans:
00154     for (uint i = 0; i < numChans(); ++i) subChan(i)->input(depthframe);
00155     LINFO("Depth channel ok.");
00156 */
00157 
00158   ASSERT(depthframe.grayFloat().initialized());
00159 
00160   // compute Reichardt motion detection into several directions
00161   for (uint dir = 0; dir < numChans(); ++dir)
00162     {
00163       subChan(dir)->input(depthframe);
00164       LINFO("Motion pyramid (%d/%d) ok.", dir+1, numChans());
00165     }
00166   } else LINFO("No depth image from retina -- IGNORING");
00167 }
00168 
00169 // ######################################################################
00170 /* So things look consistent in everyone's emacs... */
00171 /* Local Variables: */
00172 /* indent-tabs-mode: nil */
00173 /* End: */
Generated on Sun May 8 08:04:34 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3