00001 /*!@file Channels/DepthChannel.C A depth channel. */ 00002 00003 // //////////////////////////////////////////////////////////////////// // 00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005 // 00005 // by the University of Southern California (USC) and the iLab at USC. // 00006 // See http://iLab.usc.edu for information about this project. // 00007 // //////////////////////////////////////////////////////////////////// // 00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00010 // in Visual Environments, and Applications'' by Christof Koch and // 00011 // Laurent Itti, California Institute of Technology, 2001 (patent // 00012 // pending; application number 09/912,225 filed July 23, 2001; see // 00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00014 // //////////////////////////////////////////////////////////////////// // 00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00016 // // 00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00018 // redistribute it and/or modify it under the terms of the GNU General // 00019 // Public License as published by the Free Software Foundation; either // 00020 // version 2 of the License, or (at your option) any later version. // 00021 // // 00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00025 // PURPOSE. See the GNU General Public License for more details. // 00026 // // 00027 // You should have received a copy of the GNU General Public License // 00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00030 // Boston, MA 02111-1307 USA. // 00031 // //////////////////////////////////////////////////////////////////// // 00032 // 00033 // Primary maintainer for this file: Laurent Itti 00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Channels/DepthChannel.C $ 00035 // $Id: DepthChannel.C 14293 2010-12-02 01:57:25Z itti $ 00036 00037 #include "Channels/DepthChannel.H" 00038 00039 #include "Channels/OrientationChannel.H" 00040 #include "Channels/FlickerChannel.H" 00041 #include "Channels/IntensityChannel.H" 00042 #include "Channels/ChannelOpts.H" 00043 #include "Component/OptionManager.H" 00044 #include "Image/ColorOps.H" 00045 #include "Image/PyramidOps.H" // for buildPyrGaussian() 00046 #include "rutz/mutex.h" 00047 #include "rutz/trace.h" 00048 00049 00050 DepthChannel::DepthChannel(OptionManager& mgr) : 00051 ComplexChannel(mgr, "Depth", "Depth", DEPTH), 00052 itsLevelSpec(&OPT_LevelSpec, this) 00053 { 00054 GVX_TRACE(__PRETTY_FUNCTION__); 00055 00056 // create a bunch of subchannels: 00057 this->addSubChan(makeSharedComp(new IntensityChannel(mgr, "depi", "DepthIntensity"))); 00058 //this->addSubChan(makeSharedComp(new FlickerChannel(mgr, "depf", "DepthFlicker"))); 00059 //this->addSubChan(makeSharedComp(new OrientationChannel(mgr, "depo", "DepthOrientation", "depth"))); 00060 } 00061 00062 // ###################################################################### 00063 DepthChannel::~DepthChannel() 00064 { 00065 GVX_TRACE(__PRETTY_FUNCTION__); 00066 } 00067 00068 // ###################################################################### 00069 void DepthChannel::doInput(const InputFrame& origframe) 00070 { 00071 GVX_TRACE(__PRETTY_FUNCTION__); 00072 00073 /* Look for a special depth image from the retina. This is held in 00074 the retina along with the standard image in certain special 00075 instances such as when using an Open Scene Graph scene, or a 00076 Kinect FrameGrabber and is passed along as part of the 00077 InputFrame. In the future this might be placed as a new region 00078 (LGN?) since it can be cross used with a stereo channel. We need 00079 it here so we can switch pass a new depth-based InputFrame to our 00080 subchannels. */ 00081 if (origframe.hasDepthImage()) { 00082 const Image<uint16> idi = origframe.getDepthImage(); 00083 const Image<byte> cm = origframe.clipMask(); 00084 00085 // convert depth to float and normalize it: 00086 Image<float> df = idi; 00087 df *= 0.125F; // NOTE: Assumes 12-bit depth image 00088 00089 InputFrame depthframe = InputFrame::fromGrayFloat(&df, origframe.time(), &cm, InputFrame::emptyCache); 00090 00091 rutz::mutex_lock_class lock; 00092 if (depthframe.pyrCache().get() != 0 && depthframe.pyrCache()->gaussian5.beginSet(depthframe.grayFloat(), &lock)) { 00093 LINFO("Computing depth pyramid"); 00094 depthframe.pyrCache()-> 00095 gaussian5.endSet(depthframe.grayFloat(), 00096 buildPyrGaussian(depthframe.grayFloat(), 0, itsLevelSpec.getVal().maxDepth(), 5), &lock); 00097 } else { 00098 LINFO("Problem with depth pyramid"); 00099 depthframe = origframe; 00100 } 00101 00102 // send input to all our subchans: 00103 for (uint i = 0; i < numChans(); ++i) subChan(i)->input(depthframe); 00104 LINFO("Depth channel ok."); 00105 00106 } else LINFO("No depth image from retina -- IGNORING"); 00107 } 00108 00109 // ###################################################################### 00110 /* So things look consistent in everyone's emacs... */ 00111 /* Local Variables: */ 00112 /* indent-tabs-mode: nil */ 00113 /* End: */