diff --git a/CHANGELOG.md b/CHANGELOG.md index 44eee385..ab9c0d30 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ - Implemented method GaussianMixture::resize(). - Implemented method Gaussian::resize(). - Implemented method ParticleSet::resize(). +- Implemented evaluation of a multivariate Gaussian probability density function in method utils::multivariate_gaussian_density. +- Implemented evaluation of the logarithm of a multivariate Gaussian probability density function in method utils::multivariate_gaussian_log_density. - Methods EstimatesExtraction::extract() return a std::pair containing a boolean indicating if the estimate is valid and the estracted estimate. - Methods EstimatesExtraction::extract() assume that particle weights are in the log space. - Constructor HistoryBuffer::HistoryBuffer() takes the state size. @@ -23,9 +25,14 @@ ##### `Filtering functions` - Added pure public virtual method GaussianPrediction::getStateModel() (required to properly implement GPFPrediction::getStateModel()). - Implemented method KFPrediction::getStateModel(). +- Implemented method KFCorrection::getLikelihood(). - Implemented method UKFPrediction::getStateModel(). +- Implemented method UKFCorrection::getLikelihood(). +- Changed implementation of GaussianLikelihood::likelihood(). - Changed implementation of GPFPrediction::getStateModel(). - Changed implementation of GPFCorrection::getLikelihood(). +- Changed implementation of GPFCorrection::evaluateProposal(). +- Changed implementation of WhiteNoiseAcceleration::getTransitionProbability(). - Fixed missing const(s) keywords in signature of method StateModel::getTransitionProbability(). - Fixed missing const(s) keywords in signature of method WhiteNoiseAcceleration::getTransitionProbability(). - SUKFCorrection::getNoiseCovarianceMatrix() is now virtual. diff --git a/src/BayesFilters/include/BayesFilters/KFCorrection.h b/src/BayesFilters/include/BayesFilters/KFCorrection.h index 1dbfaf32..bc1490ce 100644 --- a/src/BayesFilters/include/BayesFilters/KFCorrection.h +++ b/src/BayesFilters/include/BayesFilters/KFCorrection.h @@ -30,11 +30,16 @@ class bfl::KFCorrection : public GaussianCorrection MeasurementModel& getMeasurementModel() override; + std::pair getLikelihood() override; + protected: void correctStep(const GaussianMixture& pred_state, GaussianMixture& corr_state) override; -private: std::unique_ptr measurement_model_; + + Eigen::MatrixXd innovations_; + + GaussianMixture meas_covariances_; }; #endif /* KFCORRECTION_H */ diff --git a/src/BayesFilters/include/BayesFilters/UKFCorrection.h b/src/BayesFilters/include/BayesFilters/UKFCorrection.h index 0c2ece65..1b77b225 100644 --- a/src/BayesFilters/include/BayesFilters/UKFCorrection.h +++ b/src/BayesFilters/include/BayesFilters/UKFCorrection.h @@ -34,6 +34,8 @@ class bfl::UKFCorrection : public GaussianCorrection MeasurementModel& getMeasurementModel() override; + std::pair getLikelihood() override; + protected: void correctStep(const GaussianMixture& pred_state, GaussianMixture& corr_state) override; @@ -53,6 +55,10 @@ class bfl::UKFCorrection : public GaussianCorrection * Unscented transform weight. */ sigma_point::UTWeight ut_weight_; + + Eigen::MatrixXd innovations_; + + GaussianMixture predicted_meas_; }; #endif /* UKFCORRECTION_H */ diff --git a/src/BayesFilters/include/BayesFilters/utils.h b/src/BayesFilters/include/BayesFilters/utils.h index 6dbc189f..21bf3452 100644 --- a/src/BayesFilters/include/BayesFilters/utils.h +++ b/src/BayesFilters/include/BayesFilters/utils.h @@ -61,6 +61,44 @@ std::unique_ptr make_unique(Args&& ...args) double log_sum_exp(const Eigen::Ref& arguments); +/** + * Evaluate the logarithm of a multivariate Gaussian probability density function. + * + * @param input Input representing the argument of the function as a vector or matrix. + * @param mean The mean of the associated Gaussian distribution as a vector. + * @param covariance The covariance matrix of the associated Gaussian distribution as a matrix. + * + * @return The value of the logarithm of the density function evaluated on the input data as a vector. + */ +template +Eigen::VectorXd multivariate_gaussian_log_density(const Eigen::MatrixBase& input, const Eigen::Ref& mean, const Eigen::Ref& covariance) +{ + const auto diff = input.colwise() - mean; + + Eigen::VectorXd values(diff.cols()); + for (std::size_t i = 0; i < diff.cols(); i++) + values(i) = - 0.5 * (static_cast(diff.rows()) * std::log(2.0 * M_PI) + std::log(covariance.determinant()) + (diff.col(i).transpose() * covariance.inverse() * diff.col(i))); + + return values; +} + + +/** + * Evaluate a multivariate Gaussian probability density function. + * + * @param input Input representing the argument of the function as a vector or matrix. + * @param mean The mean of the associated Gaussian distribution as a vector. + * @param covariance The covariance matrix of the associated Gaussian distribution as a matrix. + * + * @return The value of the density function evaluated on the input data as a vector. + */ +template +Eigen::VectorXd multivariate_gaussian_density(const Eigen::MatrixBase& input, const Eigen::Ref& mean, const Eigen::Ref& covariance) +{ + return multivariate_gaussian_log_density(input, mean, covariance).array().exp(); +} + + /** * This template class provides methods to keep track of time. The default time unit is milliseconds, * but can be changed during object creation using a different std::chrono::duration type. diff --git a/src/BayesFilters/src/GPFCorrection.cpp b/src/BayesFilters/src/GPFCorrection.cpp index f7e9de2c..6f40bd0b 100644 --- a/src/BayesFilters/src/GPFCorrection.cpp +++ b/src/BayesFilters/src/GPFCorrection.cpp @@ -6,6 +6,7 @@ */ #include +#include #include @@ -144,7 +145,5 @@ double GPFCorrection::evaluateProposal { /* Evaluate the proposal distribution, a Gaussian centered in 'mean' and having covariance 'covariance', in the state 'state'. */ - VectorXd difference = state - mean; - - return (-0.5 * static_cast(difference.size()) * log(2.0 * M_PI) -0.5 * log(covariance.determinant()) -0.5 * (difference.transpose() * covariance.inverse() * difference).array()).exp().coeff(0); + return utils::multivariate_gaussian_density(state, mean, covariance).coeff(0); } diff --git a/src/BayesFilters/src/GaussianLikelihood.cpp b/src/BayesFilters/src/GaussianLikelihood.cpp index 0704bede..7609cbc4 100644 --- a/src/BayesFilters/src/GaussianLikelihood.cpp +++ b/src/BayesFilters/src/GaussianLikelihood.cpp @@ -6,6 +6,7 @@ */ #include +#include using namespace bfl; using namespace Eigen; @@ -68,9 +69,7 @@ std::pair GaussianLikelihood::likelihood if (!valid_covariance_matrix) return std::make_pair(false, VectorXd::Zero(1)); - - for (unsigned int i = 0; i < innovations.cols(); ++i) - likelihood(i) = scale_factor_ * (-0.5 * static_cast(innovations.rows()) * log(2.0*M_PI) - 0.5 * log(covariance_matrix.determinant()) - 0.5 * (innovations.col(i).transpose() * covariance_matrix.inverse() * innovations.col(i)).array()).exp().coeff(0); + likelihood = scale_factor_ * utils::multivariate_gaussian_density(innovations, VectorXd::Zero(innovations.rows()), covariance_matrix); return std::make_pair(true, likelihood); } diff --git a/src/BayesFilters/src/KFCorrection.cpp b/src/BayesFilters/src/KFCorrection.cpp index 78299bb5..3be5865d 100644 --- a/src/BayesFilters/src/KFCorrection.cpp +++ b/src/BayesFilters/src/KFCorrection.cpp @@ -6,6 +6,7 @@ */ #include +#include using namespace bfl; using namespace Eigen; @@ -31,6 +32,21 @@ MeasurementModel& KFCorrection::getMeasurementModel() } +std::pair KFCorrection::getLikelihood() +{ + if ((innovations_.rows() == 0) || (innovations_.cols() == 0)) + return std::make_pair(false, VectorXd()); + + VectorXd likelihood(innovations_.cols()); + for (std::size_t i = 0; i < likelihood.size(); i++) + { + likelihood(i) = utils::multivariate_gaussian_density(innovations_.col(i), VectorXd::Zero(innovations_.rows()), meas_covariances_.covariance(i)).coeff(0); + } + + return std::make_pair(true, likelihood); +} + + void KFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixture& corr_state) { /* Get the current measurement if available. */ @@ -79,25 +95,29 @@ void KFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtur MatrixXd H = measurement_model_->getMeasurementMatrix(); /* Cast innovations once for all. */ - MatrixXd innovations = any::any_cast(std::move(innovation)); + innovations_ = any::any_cast(std::move(innovation)); + + /* Initialize measurement covariances. + GaussianMixture will effectively resize only if it needs to. */ + meas_covariances_.resize(pred_state.components, H.rows()); /* Process all the components in the mixture. */ - for (size_t i=0; i < pred_state.components; i++) + for (size_t i = 0; i < pred_state.components; i++) { /* Evaluate the measurement covariance matrix Py = H * Px * H' + R */ - MatrixXd Py = H * pred_state.covariance(i) * H.transpose() + R; + meas_covariances_.covariance(i) = H * pred_state.covariance(i) * H.transpose() + R; /* Evaluate the Kalman Gain K = Px * H' * (Py)^{-1} */ - MatrixXd K = pred_state.covariance(i) * H.transpose() * Py.inverse(); + MatrixXd K = pred_state.covariance(i) * H.transpose() * meas_covariances_.covariance(i).inverse(); /* Evaluate the filtered mean x_{k}+ = x{k}- + K * (y - y_predicted) */ - corr_state.mean(i) = pred_state.mean(i) + K * innovations.col(i); + corr_state.mean(i) = pred_state.mean(i) + K * innovations_.col(i); /* Evaluate the filtered covariance P_{k}+ = P_{k}- - K * Py * K' */ - corr_state.covariance(i).noalias() = pred_state.covariance(i) - K * Py * K.transpose(); + corr_state.covariance(i).noalias() = pred_state.covariance(i) - K * meas_covariances_.covariance(i) * K.transpose(); } } diff --git a/src/BayesFilters/src/UKFCorrection.cpp b/src/BayesFilters/src/UKFCorrection.cpp index b6d1da98..997c1fee 100644 --- a/src/BayesFilters/src/UKFCorrection.cpp +++ b/src/BayesFilters/src/UKFCorrection.cpp @@ -6,6 +6,7 @@ */ #include +#include using namespace bfl; using namespace bfl::sigma_point; @@ -61,6 +62,21 @@ MeasurementModel& UKFCorrection::getMeasurementModel() } +std::pair UKFCorrection::getLikelihood() +{ + if ((innovations_.rows() == 0) || (innovations_.cols() == 0)) + return std::make_pair(false, VectorXd()); + + VectorXd likelihood(innovations_.cols()); + for (std::size_t i = 0; i < innovations_.cols(); i++) + { + likelihood(i) = utils::multivariate_gaussian_density(innovations_.col(i), VectorXd::Zero(innovations_.rows()), predicted_meas_.covariance(i)).coeff(0); + } + + return std::make_pair(true, likelihood); +} + + void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixture& corr_state) { /* Pick the correct measurement model. */ @@ -80,7 +96,8 @@ void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtu /* Initialize predicted measurement GaussianMixture. */ std::pair meas_sizes = model.getOutputSize(); std::size_t meas_size = meas_sizes.first + meas_sizes.second; - GaussianMixture pred_meas(pred_state.components, meas_size); + /* GaussianMixture will effectively resize only if it needs to. */ + predicted_meas_.resize(pred_state.components, meas_size); /* Evaluate the joint state-measurement statistics, if possible. */ bool valid = false; @@ -94,11 +111,11 @@ void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtu std::tie(std::ignore, noise_covariance_matrix) = model.getNoiseCovarianceMatrix(); pred_state_augmented.augmentWithNoise(noise_covariance_matrix); - std::tie(valid, pred_meas, Pxy) = sigma_point::unscented_transform(pred_state_augmented, ut_weight_, *measurement_model_); + std::tie(valid, predicted_meas_, Pxy) = sigma_point::unscented_transform(pred_state_augmented, ut_weight_, *measurement_model_); } else if (type_ == UKFCorrectionType::Additive) { - std::tie(valid, pred_meas, Pxy) = sigma_point::unscented_transform(pred_state, ut_weight_, *additive_measurement_model_); + std::tie(valid, predicted_meas_, Pxy) = sigma_point::unscented_transform(pred_state, ut_weight_, *additive_measurement_model_); } if (!valid) @@ -113,8 +130,8 @@ void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtu /* This temporary is required since some MeasurementModel::innovation methods may try to cast from const Ref to MatrixXd resulting in a bfl::any::bad_any_cast. - Hopefully, using std::move, it is possible to steal the memory from pred_meas.mean(). */ - MatrixXd y_p = std::move(pred_meas.mean()); + Hopefully, using std::move, it is possible to steal the memory from predicted_meas_.mean(). */ + MatrixXd y_p = std::move(predicted_meas_.mean()); std::tie(valid_innovation, innovation) = model.innovation(y_p, measurement); if (!valid_innovation) @@ -124,21 +141,21 @@ void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtu } /* Cast innovations once for all. */ - MatrixXd innovations = any::any_cast(std::move(innovation)); + innovations_ = any::any_cast(std::move(innovation)); /* Process all the components in the mixture. */ for (size_t i=0; i < pred_state.components; i++) { /* Evaluate the Kalman Gain K = Pxy * (Py)^{-1} */ - MatrixXd K = Pxy.middleCols(meas_size * i, meas_size) * pred_meas.covariance(i).inverse(); + MatrixXd K = Pxy.middleCols(meas_size * i, meas_size) * predicted_meas_.covariance(i).inverse(); /* Evaluate the filtered mean. x_{k}+ = x{k}- + K * innovation */ - corr_state.mean(i) = pred_state.mean(i) + K * innovations.col(i); + corr_state.mean(i) = pred_state.mean(i) + K * innovations_.col(i); /* Evaluate the filtered covariance P_{k}+ = P_{k}- - K * Py * K' */ - corr_state.covariance(i) = pred_state.covariance(i) - K * pred_meas.covariance(i) * K.transpose(); + corr_state.covariance(i) = pred_state.covariance(i) - K * predicted_meas_.covariance(i) * K.transpose(); } } diff --git a/src/BayesFilters/src/WhiteNoiseAcceleration.cpp b/src/BayesFilters/src/WhiteNoiseAcceleration.cpp index 25b6590c..c2e03f03 100644 --- a/src/BayesFilters/src/WhiteNoiseAcceleration.cpp +++ b/src/BayesFilters/src/WhiteNoiseAcceleration.cpp @@ -6,6 +6,7 @@ */ #include +#include #include #include @@ -139,16 +140,7 @@ MatrixXd WhiteNoiseAcceleration::getStateTransitionMatrix() VectorXd WhiteNoiseAcceleration::getTransitionProbability(const Ref& prev_states, const Ref& cur_states) { - VectorXd probabilities(prev_states.cols()); - MatrixXd differences = cur_states - prev_states; - - std::size_t size = differences.rows(); - for (std::size_t i = 0; i < prev_states.cols(); i++) - { - probabilities(i) = (-0.5 * static_cast(size) * log(2.0 * M_PI) + -0.5 * log(Q_.determinant()) -0.5 * (differences.col(i).transpose() * Q_.inverse() * differences.col(i)).array()).exp().coeff(0); - } - - return probabilities; + return utils::multivariate_gaussian_density(prev_states, prev_states.col(0), Q_); }