00001 /*!@file BayesFilters/UKF.C Unscented Kalman Filter */ 00002 // //////////////////////////////////////////////////////////////////// // 00003 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2001 by the // 00004 // University of Southern California (USC) and the iLab at USC. // 00005 // See http://iLab.usc.edu for information about this project. // 00006 // //////////////////////////////////////////////////////////////////// // 00007 // Major portions of the iLab Neuromorphic Vision Toolkit are protected // 00008 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency // 00009 // in Visual Environments, and Applications'' by Christof Koch and // 00010 // Laurent Itti, California Institute of Technology, 2001 (patent // 00011 // pending; application number 09/912,225 filed July 23, 2001; see // 00012 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). // 00013 // //////////////////////////////////////////////////////////////////// // 00014 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. // 00015 // // 00016 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can // 00017 // redistribute it and/or modify it under the terms of the GNU General // 00018 // Public License as published by the Free Software Foundation; either // 00019 // version 2 of the License, or (at your option) any later version. // 00020 // // 00021 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope // 00022 // that it will be useful, but WITHOUT ANY WARRANTY; without even the // 00023 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // 00024 // PURPOSE. See the GNU General Public License for more details. // 00025 // // 00026 // You should have received a copy of the GNU General Public License // 00027 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write // 00028 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, // 00029 // Boston, MA 02111-1307 USA. // 00030 // //////////////////////////////////////////////////////////////////// // 00031 // 00032 // Primary maintainer for this file: Lior Elazary 00033 // $HeadURL: $ 00034 // $Id: $ 00035 00036 #ifndef UKF_C_DEFINED 00037 #define UKF_C_DEFINED 00038 00039 #include "BayesFilters/UKF.H" 00040 #include "Util/MathFunctions.H" 00041 00042 // ###################################################################### 00043 00044 UKF::UKF(int numStates, int numObservations, 00045 double k, double alpha, double beta) : 00046 itsNumStates(numStates), 00047 itsNumObservations(numObservations), 00048 itsAlpha(alpha), 00049 itsK(k), 00050 itsBeta(beta), 00051 itsUnstable(false) 00052 { 00053 itsR=Image<double>(itsNumStates,itsNumStates,ZEROS); 00054 itsQ=Image<double>(itsNumObservations,itsNumObservations,ZEROS); 00055 00056 00057 itsLambda = itsAlpha*itsAlpha*(itsNumStates+itsK)-itsNumStates; 00058 itsGamma = sqrt(itsNumStates + itsLambda); 00059 00060 //Calculate the weights for the mean and coveriance ahead of time 00061 itsMuWeight = Image<double>(1+2*itsNumStates,1,ZEROS); 00062 itsSigmaWeight = Image<double>(1+2*itsNumStates,1,ZEROS); 00063 00064 //Initial weight for the mean location 00065 itsMuWeight[0] = itsLambda/(itsNumStates + itsLambda); 00066 itsSigmaWeight[0] = (itsLambda/(itsNumStates + itsLambda)) + 00067 (1-(itsAlpha*itsAlpha)+itsBeta); 00068 00069 double weight = 1/(2*(itsNumStates+itsLambda)); 00070 for(int i=1; i<1+itsNumStates*2; i++) 00071 { 00072 itsMuWeight[i] = weight; 00073 itsSigmaWeight[i] = weight; 00074 } 00075 00076 //Initial state 00077 itsState = Image<double>(1,itsNumStates,ZEROS); 00078 itsSigma = Image<double>(itsNumStates,itsNumStates,ZEROS); 00079 00080 itsGaussNormalizer = 1.0/sqrt(pow(2.0*M_PI,itsNumStates)); 00081 } 00082 00083 void UKF::predictState(const Image<double>& noise) 00084 { 00085 if (itsUnstable) return; 00086 //Get the locations to sample from 00087 itsSigmaLocations = getSigmaLocations(itsState, itsSigma, itsGamma); 00088 00089 //Predict the new state by simulating the process at the sigma points 00090 //and then computing a new mean by a weighted sum 00091 itsNewStates = Image<double>(itsSigmaLocations.getDims(),ZEROS); 00092 Image<double> predictedState(1,itsNumStates,ZEROS); 00093 for(int k=0; k<itsSigmaLocations.getWidth(); k++) 00094 { 00095 Image<double> state = getNextState(itsSigmaLocations,k); 00096 ASSERT(state.size() == (uint)itsNumStates); 00097 predictedState += state*itsMuWeight[k]; 00098 00099 for(uint i=0; i<state.size(); i++) 00100 itsNewStates.setVal(k, i, state[i]); 00101 } 00102 00103 //Compute the predicted covariance in a similar manner 00104 Image<double> predictedSigma(itsSigma.getDims(),ZEROS); 00105 for(int k=0; k<itsNewStates.getWidth(); k++) 00106 { 00107 //Calculate the variance between the new states and the predicted State 00108 //Could be optimized by unrolling the matrix ops 00109 00110 Image<double> diff(1,predictedState.size(), ZEROS); 00111 for(uint i=0; i<diff.size(); i++) 00112 diff[i] = itsNewStates.getVal(k,i)-predictedState[i]; 00113 00114 Image<double> variance = matrixMult(diff, transpose(diff)); 00115 00116 predictedSigma += variance*itsSigmaWeight[k]; 00117 } 00118 00119 if (noise.initialized()) 00120 predictedSigma += noise; 00121 else 00122 predictedSigma += itsR; 00123 00124 itsState = predictedState; 00125 itsSigma = predictedSigma; 00126 } 00127 00128 void UKF::predictObservation(const Image<double>& noise) 00129 { 00130 if (itsUnstable) return; 00131 //Predict the observation by simulating the process at the sigma points 00132 //and then computing a new mean by a weighted sum 00133 00134 itsNewZ = Image<double>(itsNewStates.getWidth(), 00135 itsNumObservations, ZEROS); 00136 Image<double> predictedZ(1,itsNumObservations, ZEROS); 00137 00138 //At each predicted state, predict the observations 00139 for(int k=0; k<itsNewStates.getWidth(); k++) 00140 { 00141 Image<double> observation = getObservation(itsNewStates, k); 00142 ASSERT(observation.size() == (uint)itsNumObservations); 00143 predictedZ += observation*itsMuWeight[k]; 00144 for(uint i=0; i<observation.size(); i++) 00145 itsNewZ.setVal(k, i, observation[i]); 00146 } 00147 00148 //Predict the measurement covariance 00149 00150 Image<double> predictedZSigma(itsNumObservations, itsNumObservations,ZEROS); 00151 for(int k=0; k<itsNewStates.getWidth(); k++) 00152 { 00153 //Could be optimized by unrolling the matrix ops 00154 Image<double> diff(1,predictedZ.size(), ZEROS); 00155 for(uint i=0; i<diff.size(); i++) 00156 diff[i] = itsNewZ.getVal(k,i)-predictedZ[i]; 00157 Image<double> variance = matrixMult(diff, transpose(diff)); 00158 predictedZSigma += variance*itsSigmaWeight[k]; 00159 } 00160 00161 if (noise.initialized()) 00162 predictedZSigma += noise; 00163 else 00164 predictedZSigma += itsQ; 00165 00166 itsPredictedZ = predictedZ; 00167 itsPredictedZSigma = predictedZSigma; 00168 } 00169 00170 double UKF::getLikelihood(const Image<double>& z, const Image<double>& observationNoise) 00171 { 00172 if (itsUnstable) return 0; 00173 00174 if (!itsPredictedZSigma.initialized()) 00175 return 0; 00176 00177 //Make symmetric 00178 00179 Image<double> sigma = itsPredictedZSigma; 00180 if (observationNoise.initialized()) 00181 sigma += observationNoise; 00182 00183 //sigma = sigma+transpose(sigma); 00184 //sigma = sigma *0.5; 00185 double normalizer = itsGaussNormalizer* (1/sqrt(lapack::det(&sigma))); 00186 00187 ////Calculate the innovation 00188 Image<double> innov = z - itsPredictedZ; 00189 00190 try 00191 { 00192 Image<double> probMat = matrixMult(transpose(innov), matrixMult(matrixInv(sigma), innov)); 00193 return normalizer*exp(-0.5*probMat.getVal(0,0)); 00194 } catch (...) 00195 { 00196 itsUnstable = true; 00197 } 00198 00199 return 0; 00200 00201 } 00202 00203 00204 void UKF::update(const Image<double>& z, const Image<double>& noise) 00205 { 00206 if (itsUnstable) return; 00207 //Calculate the covariance between the state and the observation 00208 Image<double> Pxy(itsNumObservations, itsNumStates, ZEROS); 00209 for(int k=0; k<itsNewStates.getWidth(); k++) 00210 { 00211 //TODO: Since we calculated these diff before, we can save the for optimizations 00212 Image<double> stateDiff(1,itsNumStates,NO_INIT); 00213 for(uint i=0; i<stateDiff.size(); i++) 00214 stateDiff[i] = itsNewStates.getVal(k,i) - itsState[i]; 00215 00216 Image<double> zDiff(1,itsNumObservations,NO_INIT); 00217 for(uint i=0; i<zDiff.size(); i++) 00218 zDiff[i] = itsNewZ.getVal(k,i) - itsPredictedZ[i]; 00219 00220 Image<double> variance = matrixMult(stateDiff, transpose(zDiff)); 00221 Pxy += variance*itsSigmaWeight[k]; 00222 } 00223 00224 try 00225 { 00226 Image<double> sigma = itsPredictedZSigma; 00227 //if (noise.initialized()) //Add the noise if available 00228 // sigma += noise; 00229 00230 //Calculate the kalman gain 00231 Image<double> K = matrixMult(Pxy, matrixInv(sigma)); 00232 00233 //Calculate the innovation 00234 Image<double> innov = z - itsPredictedZ; 00235 00236 //Update the state and covariance 00237 itsState += matrixMult(K,innov); 00238 itsSigma -= matrixMult(K,transpose(Pxy)); 00239 } catch (...) 00240 { 00241 itsUnstable = true; 00242 } 00243 00244 00245 } 00246 00247 00248 Image<double> UKF::getSigmaLocations(const Image<double>& state, 00249 const Image<double>& sigma, double gamma) 00250 { 00251 //We use the chol decomposition for stability instead of the sqrt 00252 00253 //Make symmetric 00254 Image<double> A = sigma; 00255 //A = A+transpose(A); 00256 //A = A *0.5; 00257 Image<double> SChol = lapack::dpotrf(&A); 00258 //Reset the lower symmetry to 0 (matlab seems to do it for the chol function) 00259 for(int j=0; j<SChol.getHeight(); j++) 00260 for(int i=0; i<j; i++) 00261 SChol.setVal(i,j,0); 00262 00263 A=transpose(SChol)*gamma; 00264 00265 Image<double> X(state.size()*2 + 1, state.size(),ZEROS); 00266 00267 for(int j=0; j<X.getHeight(); j++) 00268 X.setVal(0,j,state[j]); 00269 00270 for(int i=0; i<A.getWidth(); i++) 00271 for(int j=0; j<A.getHeight(); j++) 00272 X.setVal(1+i,j,state[j]+A.getVal(i,j)); 00273 00274 for(int i=0; i<A.getWidth(); i++) 00275 for(int j=0; j<A.getHeight(); j++) 00276 X.setVal(1+state.size()+i,j,state[j]-A.getVal(i,j)); 00277 00278 return X; 00279 } 00280 00281 // ###################################################################### 00282 /* So things look consistent in everyone's emacs... */ 00283 /* Local Variables: */ 00284 /* indent-tabs-mode: nil */ 00285 /* End: */ 00286 00287 #endif