NeuroSimEvents.H

Go to the documentation of this file.
00001 /*!@file Neuro/NeuroSimEvents.H SimEvent derivatives for neuro modules */
00002 
00003 // //////////////////////////////////////////////////////////////////// //
00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005   //
00005 // by the University of Southern California (USC) and the iLab at USC.  //
00006 // See http://iLab.usc.edu for information about this project.          //
00007 // //////////////////////////////////////////////////////////////////// //
00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
00010 // in Visual Environments, and Applications'' by Christof Koch and      //
00011 // Laurent Itti, California Institute of Technology, 2001 (patent       //
00012 // pending; application number 09/912,225 filed July 23, 2001; see      //
00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status).     //
00014 // //////////////////////////////////////////////////////////////////// //
00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit.       //
00016 //                                                                      //
00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can   //
00018 // redistribute it and/or modify it under the terms of the GNU General  //
00019 // Public License as published by the Free Software Foundation; either  //
00020 // version 2 of the License, or (at your option) any later version.     //
00021 //                                                                      //
00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope  //
00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the   //
00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR      //
00025 // PURPOSE.  See the GNU General Public License for more details.       //
00026 //                                                                      //
00027 // You should have received a copy of the GNU General Public License    //
00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write   //
00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,   //
00030 // Boston, MA 02111-1307 USA.                                           //
00031 // //////////////////////////////////////////////////////////////////// //
00032 //
00033 // Primary maintainer for this file: Laurent Itti <itti@usc.edu>
00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Neuro/NeuroSimEvents.H $
00035 // $Id: NeuroSimEvents.H 14762 2011-05-03 01:13:16Z siagian $
00036 //
00037 
00038 #ifndef NEURO_NEUROSIMEVENTS_H_DEFINED
00039 #define NEURO_NEUROSIMEVENTS_H_DEFINED
00040 
00041 #include "Channels/InputFrame.H"
00042 #include "Neuro/WTAwinner.H"
00043 #include "Neuro/SaccadeBodyPart.H"
00044 #include "Neuro/SaccadeController.H"
00045 #include "Psycho/EyeTrace.H"
00046 #include "Simulation/SimEvent.H"
00047 #include "Simulation/SimReq.H"
00048 #include "Util/TransientStatus.H"
00049 #include "Channels/ChannelVisitor.H"
00050 
00051 class SpaceVariantTransform;
00052 class EyeData;
00053 class HandData;
00054 class ParamMap;
00055 class RawVisualCortex;
00056 class IntegerRawVisualCortex;
00057 class EnvVisualCortexFloat;
00058 class ChannelMaps;
00059 class VisualCortexEyeMvt;
00060 
00061 // ######################################################################
00062 //! The WinnerTakeAll selected a winner
00063 class SimEventWTAwinner : public SimEvent {
00064 public:
00065   //! Constuctor
00066   SimEventWTAwinner(SimModule* src, const WTAwinner w, const uint shiftnum);
00067 
00068   //! Destructor
00069   virtual ~SimEventWTAwinner();
00070 
00071   //! Get a description for printing out
00072   /*! Shows regular SimEvent info, plus winner coordinates. */
00073   virtual std::string toString() const;
00074 
00075   //! Get the winner
00076   const WTAwinner& winner() const;
00077 
00078   //! Get the attention shift number (0-based)
00079   uint shiftNum() const;
00080 
00081 private:
00082   const WTAwinner itsWinner;
00083   const uint itsShiftNum;
00084 };
00085 
00086 // ######################################################################
00087 //! The TargetChecker hit a one or more targets
00088 class SimEventTargetsHit : public SimEvent {
00089 public:
00090   //! Constuctor
00091   SimEventTargetsHit(SimModule* src, const int numhit);
00092 
00093   //! Destructor
00094   virtual ~SimEventTargetsHit();
00095 
00096   //! Get a description for printing out
00097   /*! Shows regular SimEvent info, plus number of targets hit. */
00098   virtual std::string toString() const;
00099 
00100   //! Get the number of targets hit this time
00101   int numHits() const;
00102 
00103 private:
00104   const int itsNumHits;
00105 };
00106 
00107 // ######################################################################
00108 //! A new image is available from the retina
00109 class SimEventRetinaImage : public SimEvent {
00110 public:
00111   //! Constuctor
00112   SimEventRetinaImage(SimModule* src, const InputFrame& ifr,
00113                       const Rectangle& rawinprect,
00114                       const Point2D<int> offset);
00115 
00116   //! Constuctor with tranform
00117   SimEventRetinaImage(SimModule* src, const InputFrame& ifr,
00118                       const Rectangle& rawinprect,
00119                       const Point2D<int> offset, 
00120                       rutz::shared_ptr<SpaceVariantTransform> retinal_transform,
00121                       rutz::shared_ptr<SpaceVariantTransform> map_transform);
00122 
00123   //! Destructor
00124   virtual ~SimEventRetinaImage();
00125 
00126   //! Get a description for printing out
00127   virtual std::string toString() const;
00128 
00129   //! Get the frame
00130   const InputFrame& frame() const;
00131 
00132   //! Get the raw input rectangle
00133   const Rectangle& rawInputRectangle() const;
00134 
00135   //! Translate a raw eye position from stimulus coordinates to retinal
00136   virtual Point2D<int> rawToRetinal(const Point2D<int>& rawpos) const;
00137 
00138   //! Translate a retinal eye position back into raw stimulus coordinates
00139   virtual Point2D<int> retinalToRaw(const Point2D<int>& retpos) const;
00140 
00141   //! get the raw input dims
00142   virtual const Dims& getRawInputDims() const;
00143 
00144   //! Get the coordinates of the center of the image
00145   virtual Point2D<int> center() const;
00146 
00147   //! Get the retinal/raw offset
00148   /*! Use with caution! Prefer using rawToRetinal() and retinalToRaw() instead. */
00149   const Point2D<int>& offset() const;
00150 
00151   //! get retinal transform
00152   rutz::shared_ptr<SpaceVariantTransform> getRetTransform() const;
00153 
00154   //! get map transform
00155   rutz::shared_ptr<SpaceVariantTransform> getMapTransform() const;
00156 
00157 private:
00158   const InputFrame      itsFrame;
00159   const Rectangle       itsRawInputRectangle;
00160   const Point2D<int>    itsOffset;
00161   rutz::shared_ptr<SpaceVariantTransform> itsRetTransform;
00162   rutz::shared_ptr<SpaceVariantTransform> itsMapTransform;
00163 };
00164 
00165 // ######################################################################
00166 //! A new output is available from the VisualCortex
00167 class SimEventVisualCortexOutput : public SimEvent {
00168 public:
00169   //! Constuctor
00170   SimEventVisualCortexOutput(SimModule* src, const Image<float>& vcout);
00171 
00172   //! Destructor
00173   virtual ~SimEventVisualCortexOutput();
00174 
00175   //! Get a description for printing out
00176   virtual std::string toString() const;
00177 
00178   //! Get the VisualCortex output
00179   /*! A factor of 1.0 will return the raw, unnormalized map. A factor
00180     of 0.0F will normalize the map to a range of [0..255]. Any other
00181     factor will be used to multiply the map values. */
00182   const Image<float> vco(const float factor = 1.0F) const;
00183 
00184 private:
00185   const Image<float> itsMap;
00186 };
00187 
00188 // ######################################################################
00189 //! A new output is available from the SaliencyMap
00190 class SimEventSaliencyMapOutput : public SimEvent {
00191 public:
00192   //! Constuctor
00193   SimEventSaliencyMapOutput(SimModule* src, const Image<float>& smout, const int maplevel);
00194 
00195   //! Destructor
00196   virtual ~SimEventSaliencyMapOutput();
00197 
00198   //! Get a description for printing out
00199   virtual std::string toString() const;
00200 
00201   //! Get the SaliencyMap output
00202   /*! A factor of 1.0 will return the raw, unnormalized map. A factor
00203     of 0.0F will normalize the map to a range of [0..255]. Any other
00204     factor will be used to multiply the map values. */
00205   const Image<float> sm(const float factor = 1.0F) const;
00206 
00207   //! Transform coordinates from saliency map to original image
00208   Point2D<int> smToOrig(const Point2D<int>& p) const;
00209 
00210   //! Transform coordinates from original image to saliency map
00211   Point2D<int> origToSm(const Point2D<int>& p) const;
00212 
00213 private:
00214   const Image<float> itsMap;
00215   const int itsMapLevel;
00216 };
00217 
00218 // // ######################################################################
00219 // //! A new output is available from the MT module
00220 // class SimEventMTfeatureMapOutput : public SimEvent {
00221 // public:
00222 //   //! Constuctor
00223 //   SimEventMTfeatureMapOutput
00224 //   (SimModule* src, const std::vector<Image<float> > mtFeat);
00225 
00226 //   //! Destructor
00227 //   virtual ~SimEventMTfeatureMapOutput();
00228 
00229 //   //! Get a description for printing out
00230 //   virtual std::string toString() const;
00231 
00232 //   //! Get the MT feature Map output
00233 //   const std::vector<Image<float> > mtFeatures() const;
00234 
00235 // private:
00236 //   const std::vector<Image<float> > itsMTfeatures;
00237 // };
00238 
00239 // ######################################################################
00240 //! A new output is available from the GistEstimator
00241 class SimEventGistOutput : public SimEvent {
00242 public:
00243   //! Constuctor
00244   SimEventGistOutput(SimModule* src, const Image<float>& gout);
00245 
00246   //! Destructor
00247   virtual ~SimEventGistOutput();
00248 
00249   //! Get a description for printing out
00250   virtual std::string toString() const;
00251 
00252   //! Get the gist feature vector
00253   const Image<float> gv() const;
00254 
00255 private:
00256   const Image<float> itsGistVector;
00257 };
00258 
00259 // #####################################################################
00260 //! A new output is available from the TaskRelevanceMap
00261 class SimEventTaskRelevanceMapOutput : public SimEvent {
00262 public:
00263   //! Constuctor
00264   SimEventTaskRelevanceMapOutput(SimModule* src, const Image<float>& smout);
00265 
00266   //! Destructor
00267   virtual ~SimEventTaskRelevanceMapOutput();
00268 
00269   //! Get a description for printing out
00270   virtual std::string toString() const;
00271 
00272   //! Get the TaskRelevanceMap output
00273   /*! A factor of 1.0 will return the raw, unnormalized map. A factor
00274     of 0.0F will normalize the map to a range of [0..255]. Any other
00275     factor will be used to multiply the map values. */
00276   const Image<float> trm(const float factor = 1.0F) const;
00277 
00278 private:
00279   const Image<float> itsMap;
00280 };
00281 
00282 // ######################################################################
00283 //! A new output(s) is available from the AttentionGuidanceMap
00284 class SimEventAttentionGuidanceMapOutput : public SimEvent {
00285 public:
00286   //! Constuctor for a single map
00287   SimEventAttentionGuidanceMapOutput(SimModule* src, const Image<float>& agmout);
00288 
00289   //!constructor for when we have more than 1 map
00290   SimEventAttentionGuidanceMapOutput(SimModule* src, const ImageSet<float>& agmout);
00291   
00292   //! Destructor
00293   virtual ~SimEventAttentionGuidanceMapOutput();
00294 
00295   //! Get a description for printing out
00296   virtual std::string toString() const;
00297 
00298   //! get the number of maps
00299   uint numMaps() const;
00300 
00301   //! Get the AttentionGuidanceMap output
00302   /*! A factor of 1.0 will return the raw, unnormalized map. A factor
00303     of 0.0F will normalize the map to a range of [0..255]. Any other
00304     factor will be used to multiply the map values. */
00305   const Image<float> agm(const float factor = 1.0F, const uint pos = 0) const;
00306 
00307   //! Get all AttentionGuidanceMap outputs
00308   /*! A factor of 1.0 will return the raw, unnormalized map. A factor
00309     of 0.0F will normalize the map to a range of [0..255]. Any other
00310     factor will be used to multiply the map values. */
00311   const ImageSet<float> allAgm(const float factor = 1.0F) const;
00312   
00313 private:
00314   ImageSet<float> itsMap;
00315 };
00316 
00317 // ######################################################################
00318 //! A new output is available from the AttentionGate
00319 class SimEventAttentionGateOutput : public SimEvent {
00320 public:
00321   //! Constuctor
00322   SimEventAttentionGateOutput(SimModule* src,
00323                               const Image<float>& agmout,
00324                               const Image<float>& lamout,
00325                               const Image<float>& camout,
00326                               const unsigned int lamframe);
00327 
00328   //! Destructor
00329   virtual ~SimEventAttentionGateOutput();
00330 
00331   //! Get a description for printing out
00332   virtual std::string toString() const;
00333 
00334   //! Attention gate map
00335   const Image<float> ag(const float factor = 1.0F) const;
00336 
00337   //! What finally got through in the last attention map
00338   const Image<float> lam(const float factor = 1.0F) const;
00339 
00340   //! candidate for what will get through in the current attention map
00341   const Image<float> cam(const float factor = 1.0F) const;
00342 
00343   //! What was the frame number on our last attention gate;
00344   const unsigned int lamFrame() const;
00345 private:
00346   const Image<float> itsMap;
00347   const Image<float> itsLastAttMap;
00348   const Image<float> itsCurrAttMap;
00349   const unsigned int itsLastFrame;
00350 };
00351 
00352 // ######################################################################
00353 struct SimEventAttentionGateStageTwoObjects
00354 {
00355   int                              n;
00356   Image<int>                       segments;
00357   Image<float>                     fdistance;
00358   std::vector<int>                 x;
00359   std::vector<int>                 y;
00360   std::vector<int>                 id;
00361   std::vector<float>               val;
00362   std::vector<std::vector<float> > features;
00363 };
00364 
00365 class SimEventAttentionGateStageTwoSegments : public SimEvent {
00366 public:
00367   //! Constuctor
00368   SimEventAttentionGateStageTwoSegments(SimModule* src,
00369                                const Image<bool>& candidates,
00370                                const SimEventAttentionGateStageTwoObjects& obj,
00371                                const int          segnum);
00372 
00373   //! Destructor
00374   ~SimEventAttentionGateStageTwoSegments();
00375 
00376   //! Which attention gate regions are open from stage one
00377   const Image<bool> candidates() const;
00378 
00379   //! What are the attention gate regions
00380   const SimEventAttentionGateStageTwoObjects obj() const;
00381 
00382   //! how many segments were found
00383   const int segnum() const;
00384 
00385 private:
00386   const Image<bool>                          itsCandidates;
00387   const SimEventAttentionGateStageTwoObjects itsObjects;
00388   const int                                  itsSegmentNum;
00389 };
00390 
00391 // ######################################################################
00392 //! A new output is available from the ShapeEstimator
00393 class SimEventShapeEstimatorOutput : public SimEvent {
00394 public:
00395   //! Constuctor
00396   SimEventShapeEstimatorOutput(SimModule* src,
00397                                const Image<float>& winmap,
00398                                const Image<byte>& objmask,
00399                                const Image<byte>& iormask,
00400                                const Image<float>& smoothmask,
00401                                const Image<float>& cumsmoothmask,
00402                                const std::string& winlabel,
00403                                const bool isshaped);
00404 
00405   //! Destructor
00406   virtual ~SimEventShapeEstimatorOutput();
00407 
00408   //! Get a description for printing out
00409   virtual std::string toString() const;
00410 
00411   //! Get the map where the winner was found
00412   const Image<float>& winningMap() const;
00413 
00414   //! Get the object mask
00415   /*! The object mask is a binary mask {0,255} specifiying the extend
00416     of the object (255 inside the object and 0 outside). Its dims are
00417     the dims of the map where the object was segmented (depends on
00418     shape estimator mode used). */
00419   const Image<byte>& objectMask() const;
00420 
00421   //! Get the IOR mask
00422   /*! The IOR mask is a byte map [0,255] - 0 everywhere outside the
00423    object, (winMapNormalized * 255) everywhere inside the object this
00424    is used for IOR in a graded manner - the stronger the winning
00425    property is in a certain position, the stronger this position is
00426    inhibited. */
00427   const Image<byte>& iorMask() const;
00428 
00429   //! Get the smooth mask
00430   /*! The smooth mask is a float map [0.0,1.0] in input image
00431    coordinates created from the object mask by scaling it up and
00432    smoothing out the edges with some specified smoothing method. */
00433   const Image<float>& smoothMask() const;
00434 
00435   //! Get the cumulative smooth mask
00436   /*! The cumulative smooth mask a merger of all smooth masks since
00437     the last reset(). */
00438   Image<float> cumSmoothMask() const;
00439 
00440   //! Get a negative of the cumulative smooth mask
00441   Image<float> negCumSmoothMask() const;
00442 
00443   //! Get description of the feature/conspic/saliency map used for masks
00444   const std::string& winningLabel() const;
00445 
00446   //! Get object area, in original input pixels
00447   uint objectArea() const;
00448 
00449   //! Did the shape extraction succeed?
00450   /*! If this returns true, then the ShapeEstimator was able to find a
00451     shape to extract. Otherwise it fell back to just using a disk at
00452     the attended location. The masks always contain something, just
00453     sometimes that thing may be a disk rather than a nicely segmented
00454     object. */
00455   bool isShaped() const;
00456 
00457 private:
00458   const Image<float> itsWinningMap;
00459   const Image<byte> itsObjMask;
00460   const Image<byte> itsIORmask;
00461   const Image<float> itsSmoothMask;
00462   const Image<float> itsCumSmoothMask;
00463   const std::string itsWinLabel;
00464   const bool itsIsShaped;
00465 };
00466 
00467 // ######################################################################
00468 //! A SaccadeController may post this at every evolve
00469 /*! Note how you cannot implement an object of this type, since it
00470   contains a pure virtual function. Use SimEventSaccadeStatusEye or
00471   SimEventSaccadeStatusHead instead. */
00472 class SimEventSaccadeStatus : public SimEvent {
00473 public:
00474   //! Constuctor
00475   SimEventSaccadeStatus(SimModule* src, const Point2D<int>& pos,
00476                         const SaccadeState state,
00477                         const SaccadeState prevState,
00478                         const bool blinkState,
00479                         const bool prevBlinkState);
00480 
00481   //! Destructor
00482   virtual ~SimEventSaccadeStatus();
00483 
00484   //! Get a description for printing out
00485   virtual std::string toString() const;
00486 
00487   //! Get the position
00488   const Point2D<int>& position() const;
00489 
00490   //! Are we in/out/beginning/ending unknown/junk mode?
00491   TransientStatus unknownStatus() const;
00492 
00493   //! Are we in/out/beginning/ending fixation?
00494   TransientStatus fixationStatus() const;
00495 
00496   //! Are we in/out/beginning/ending saccade?
00497   TransientStatus saccadeStatus() const;
00498 
00499   //! Are we in/out/beginning/ending blink?
00500   TransientStatus blinkStatus() const;
00501 
00502   //! Are we in/out/beginning/ending smooth pursuit?
00503   TransientStatus smoothPursuitStatus() const;
00504 
00505   //! Get the body part, used by toString()
00506   virtual SaccadeBodyPart bodyPart() const = 0;
00507 
00508 private:
00509   const Point2D<int> itsPosition;
00510   const SaccadeState itsState;
00511   const SaccadeState itsPrevState;
00512   const bool itsBlinkState;
00513   const bool itsPrevBlinkState;
00514 };
00515 
00516 // ######################################################################
00517 //! An Eye SaccadeController may post this at every evolve
00518 class SimEventSaccadeStatusEye : public SimEventSaccadeStatus {
00519 public:
00520   //! Constuctor
00521   SimEventSaccadeStatusEye(SimModule* src, const Point2D<int>& pos,
00522                            const SaccadeState state,
00523                            const SaccadeState prevState,
00524                            const bool blinkState,
00525                            const bool prevBlinkState);
00526 
00527   //! Destructor
00528   virtual ~SimEventSaccadeStatusEye();
00529 
00530   //! Get the body part
00531   SaccadeBodyPart bodyPart() const;
00532 };
00533 
00534 // ######################################################################
00535 //! An Head SaccadeController may post this at every evolve
00536 class SimEventSaccadeStatusHead : public SimEventSaccadeStatus {
00537 public:
00538   //! Constuctor
00539   SimEventSaccadeStatusHead(SimModule* src, const Point2D<int>& pos,
00540                             const SaccadeState state,
00541                             const SaccadeState prevState,
00542                             const bool blinkState,
00543                             const bool prevBlinkState);
00544 
00545   //! Destructor
00546   virtual ~SimEventSaccadeStatusHead();
00547 
00548   //! Get the body part
00549   SaccadeBodyPart bodyPart() const;
00550 };
00551 
00552 // ######################################################################
00553 //! An EyeTrackerSaccadeController may post this
00554 class SimEventEyeTrackerData : public SimEvent {
00555 public:
00556   //! Constuctor
00557   SimEventEyeTrackerData(SimModule* src, rutz::shared_ptr<EyeData> d,
00558                          const uint trackernum,
00559                          const std::string& trackerfname,
00560                          const PixRGB<byte>& trackercolor, 
00561                          const PixPerDeg& ppd, const SimTime samplerate);
00562 
00563   //! Destructor
00564   virtual ~SimEventEyeTrackerData();
00565 
00566   //! Get the eye data
00567   rutz::shared_ptr<EyeData> data() const;
00568 
00569   //! Get the tracker number
00570   uint trackerNum() const;
00571 
00572   //! Get the tracker filename
00573   std::string trackerFilename() const;
00574 
00575   //! Get the tracker color
00576   PixRGB<byte> trackerColor() const;
00577 
00578   //! Get the current pixels per degree
00579   PixPerDeg trackerPpd() const;
00580 
00581   //!get the sampling rate
00582   SimTime trackerHz() const;
00583 
00584 private:
00585   rutz::shared_ptr<EyeData> itsData;
00586   const uint itsTrackerNum;
00587   const std::string itsTrackerFname;
00588   const PixRGB<byte> itsTrackerColor;
00589   const PixPerDeg itsPpd;
00590   const SimTime itsHz;
00591 };
00592 
00593 
00594 // ######################################################################
00595 //! An TrackerHandController may post this
00596 class SimEventHandTrackerData : public SimEvent {
00597 public:
00598   //! Constuctor
00599   SimEventHandTrackerData(SimModule* src, rutz::shared_ptr<HandData> d,
00600                           const uint trackernum,
00601                           const std::string& trackerfname,
00602                           const PixRGB<byte>& trackercolor, 
00603                           const SimTime samplerate);
00604   
00605   //! Destructor
00606   virtual ~SimEventHandTrackerData();
00607 
00608   //! Get the eye data
00609   rutz::shared_ptr<HandData> data() const;
00610 
00611   //! Get the tracker number
00612   uint trackerNum() const;
00613 
00614   //! Get the tracker filename
00615   std::string trackerFilename() const;
00616 
00617   //! Get the tracker color
00618   PixRGB<byte> trackerColor() const;
00619 
00620   //!get the sampling rate
00621   SimTime trackerHz() const;
00622 
00623 private:
00624   rutz::shared_ptr<HandData> itsData;
00625   const uint itsTrackerNum;
00626   const std::string itsTrackerFname;
00627   const PixRGB<byte> itsTrackerColor;
00628   const SimTime itsHz;
00629 };
00630 
00631 
00632 // ######################################################################
00633 //! Trigger a ChannelVisitor on VisualCortex
00634 /*! This request is checked for and caught by VisualCortex. Upon
00635     receipt, VisualCortex will first call preProcessing(), the accept
00636     the visitor, and finally call postProcessing(). */
00637 class SimReqVCXchanVis : public SimReq {
00638 public:
00639   //! Construct from a pre-loaded ParamMap
00640   SimReqVCXchanVis(SimModule* src, rutz::shared_ptr<ChannelVisitor> vis);
00641 
00642   //! Destructor
00643   virtual ~SimReqVCXchanVis();
00644 
00645   //! Run some pre-processing before we accept()
00646   /*! On the base class, this is a no-op. */
00647   virtual void preProcessing(RawVisualCortex *vcx);
00648 
00649   //! Get our visitor
00650   rutz::shared_ptr<ChannelVisitor> visitor() const;
00651 
00652   //! Run some post-processing after we accept()
00653   /*! On the base class, this is a no-op. */
00654   virtual void postProcessing(RawVisualCortex *vcx);
00655 
00656 private:
00657   rutz::shared_ptr<ChannelVisitor> itsVisitor;
00658 };
00659 
00660 // ######################################################################
00661 //! Indicate which object we are biasing for (use for statistics)
00662 class SimEventObjectToBias : public SimEvent {
00663 public:
00664   SimEventObjectToBias(SimModule* src, const std::string& objName);
00665 
00666   virtual ~SimEventObjectToBias();
00667 
00668   const std::string& name() const;
00669 
00670 private:
00671   const std::string itsObjName;
00672 };
00673 
00674 // ######################################################################
00675 //! A new target mask is available, TargetChecker will check for this and use the new mask
00676 class SimEventTargetMask : public SimEvent {
00677 public:
00678   //! Constuctor
00679   SimEventTargetMask(SimModule* src, const Image<byte>& tmask);
00680 
00681   //! Destructor
00682   virtual ~SimEventTargetMask();
00683 
00684   //! Get a description for printing out
00685   virtual std::string toString() const;
00686 
00687   //! Get the target mask
00688   const Image<byte> mask() const;
00689 
00690 private:
00691   const Image<byte> itsMask;
00692 };
00693 
00694 // ######################################################################
00695 //! Request VCX features at a given location
00696 /*! PROGRAMMER NOTE: This is an example of a read/write SimReq,
00697     whereby the VCX directly dumps its features into the request and
00698     posts nothing in return. The object that did a request() of this
00699     event can then directly use the results in the event as soon as
00700     request() returns. If features() is empty after the event has been
00701     requested, then it means that no VisualCortex caught it and there are
00702     no features available. */
00703 class SimReqVCXfeatures : public SimReq {
00704 public:
00705   //! Constuctor
00706   SimReqVCXfeatures(SimModule* src, const Point2D<int>& p);
00707 
00708   //! Destructor
00709   virtual ~SimReqVCXfeatures();
00710 
00711   //! Get a description for printing out
00712   virtual std::string toString() const;
00713 
00714   //! Get access to the location of the features
00715   const Point2D<int>& loc() const;
00716 
00717   //! Get read/write access to the features
00718   std::vector<float>& features();
00719 
00720 private:
00721   const Point2D<int> itsLoc;
00722   std::vector<float> itsFeatures;
00723 };
00724 
00725 // ######################################################################
00726 //! Request VCX internal maps
00727 /*! PROGRAMMER NOTE: This is an example of a read/write SimReq,
00728     whereby the VCX directly dumps its features into the request and
00729     posts nothing in return. The object that did a request() of this
00730     event can then directly use the results in the event as soon as
00731     request() returns. While this object is quite benign as long as
00732     everything runs on a same machine, beware that it is potentially
00733     quite costly to transport from one machine to another via
00734     network. */
00735 class SimReqVCXmaps : public SimReq {
00736 public:
00737   //! Constuctor
00738   SimReqVCXmaps(SimModule* src);
00739 
00740   //! Destructor
00741   virtual ~SimReqVCXmaps();
00742 
00743   //! Get the ChannelMaps object that will contain all the VCX maps
00744   rutz::shared_ptr<ChannelMaps> channelmaps() const;
00745 
00746 private:
00747   rutz::shared_ptr<ChannelMaps> itsChannelMaps;
00748 
00749   friend class VisualCortexStd;
00750   friend class VisualCortexInt;
00751   friend class VisualCortexEnv;
00752   friend class VisualCortexEyeMvt;
00753   void populateChannelMaps(RawVisualCortex *vcx);
00754   void populateChannelMaps(IntegerRawVisualCortex *vcx);
00755   void populateChannelMaps(EnvVisualCortexFloat *vcx);
00756   void populateChannelMaps(VisualCortexEyeMvt *vcx);
00757 };
00758 
00759 // ######################################################################
00760 //! A new output is available from the VisualBuffer
00761 class SimEventVisualBufferOutput : public SimEvent {
00762 public:
00763   //! Constuctor
00764   SimEventVisualBufferOutput(SimModule* src, const Image<float>& buf,
00765                              const int smlev, const Dims& smdims, const Point2D<int>& retoff);
00766 
00767   //! Destructor
00768   virtual ~SimEventVisualBufferOutput();
00769 
00770   //! Get a description for printing out
00771   virtual std::string toString() const;
00772 
00773   //! Get the buffer
00774   const Image<float>& buffer() const;
00775 
00776   //! transform coord from retinotopic/retinal-scale to world-centered/sm-scale
00777   Point2D<int> retinalToBuffer(const Point2D<int>& p) const;
00778 
00779   //! transform coord from world-centered/sm-scale to retinotopic/retinal-scale
00780   Point2D<int> bufferToRetinal(const Point2D<int>& p) const;
00781 
00782   //! SM dims
00783   const Dims& smdims() const;
00784 
00785   //! SM level
00786   int smlev() const;
00787 
00788 private:
00789   const Image<float> itsVisualBuffer;
00790   const int itsSMlev;
00791   const Dims itsSMdims;
00792   const Point2D<int> itsRetinaOffset;
00793 };
00794 
00795 // ######################################################################
00796 /* So things look consistent in everyone's emacs... */
00797 /* Local Variables: */
00798 /* mode: c++ */
00799 /* indent-tabs-mode: nil */
00800 /* End: */
00801 
00802 #endif // NEURO_NEUROSIMEVENTS_H_DEFINED
Generated on Sun May 8 08:41:03 2011 for iLab Neuromorphic Vision Toolkit by  doxygen 1.6.3