00001 /*!@file Learn/ART1.H Adaptive Resonance Theory */ 00002 00003 // //////////////////////////////////////////////////////////////////// // 00004 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005 // 00005 // by the University of Southern California (USC) and the iLab at USC. // 00006 // See http://iLab.usc.edu for information about this project. // 00007 // //////////////////////////////////////////////////////////////////// // 00008 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00009 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00010 // in Visual Environments, and Applications'' by Christof Koch and // 00011 // Laurent Itti, California Institute of Technology, 2001 (patent // 00012 // pending; application number 09/912,225 filed July 23, 2001; see // 00013 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00014 // //////////////////////////////////////////////////////////////////// // 00015 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00016 // // 00017 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00018 // redistribute it and/or modify it under the terms of the GNU General // 00019 // Public License as published by the Free Software Foundation; either // 00020 // version 2 of the License, or (at your option) any later version. // 00021 // // 00022 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00023 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00024 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00025 // PURPOSE. See the GNU General Public License for more details. // 00026 // // 00027 // You should have received a copy of the GNU General Public License // 00028 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00029 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00030 // Boston, MA 02111-1307 USA. // 00031 // //////////////////////////////////////////////////////////////////// // 00032 // 00033 // Primary maintainer for this file: Lior Elazary <elazary@usc.edu> 00034 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Learn/ART1.H $ 00035 // $Id: ART1.H 12962 2010-03-06 02:13:53Z irock $ 00036 // 00037 //Inspired from http://www.neural-networks-at-your-fingertips.com/art1.html by Karsten Kutza 00038 00039 #ifndef LEARN_ART1_H_DEFINED 00040 #define LEARN_ART1_H_DEFINED 00041 00042 #include "Util/Types.H" // for uint 00043 #include <vector> 00044 #include <string> 00045 00046 class ART1 00047 { 00048 public: 00049 struct Unit 00050 { 00051 bool output; 00052 std::vector<double> weights; 00053 bool inhibited; //the inhibition status of ith F2 unit 00054 }; 00055 00056 00057 struct Layer 00058 { 00059 std::vector<Unit> units; 00060 }; 00061 00062 //! init an ART network with inputSize and numClasses 00063 ART1(const int inputSize, const int numClasses); 00064 00065 ~ART1(); 00066 00067 //! evolve the network and return the class id 00068 int evolveNet(std::string in); 00069 00070 00071 int propagateToF2(); 00072 void propagateToF1(const std::vector<bool> input,const int winner); 00073 void adjustWeights(const int winner); 00074 void setInput(const std::vector<bool> input); 00075 00076 00077 00078 00079 private: 00080 int itsInputSize; 00081 int itsNumClasses; 00082 Layer itsF1; //input layer 00083 Layer itsF2; //Output layer 00084 double itsA1; //A parameter for first layer 00085 double itsB1; //B parameter for first layer 00086 double itsC1; //C parameter for first layer 00087 double itsD1; //D parameter for first layer 00088 double itsL; //A parameter for network 00089 double itsRho; //vigilance parameter 00090 }; 00091 00092 // ###################################################################### 00093 /* So things look consistent in everyone's emacs... */ 00094 /* Local Variables: */ 00095 /* indent-tabs-mode: nil */ 00096 /* End: */ 00097 00098 #endif