Skip to content
Merged
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
- Implemented method GaussianMixture::resize().
- Implemented method Gaussian::resize().
- Implemented method ParticleSet::resize().
- Implemented evaluation of a multivariate Gaussian probability density function in method utils::multivariate_gaussian_density.
- Implemented evaluation of the logarithm of a multivariate Gaussian probability density function in method utils::multivariate_gaussian_log_density.
- Methods EstimatesExtraction::extract() return a std::pair containing a boolean indicating if the estimate is valid and the estracted estimate.
- Methods EstimatesExtraction::extract() assume that particle weights are in the log space.
- Constructor HistoryBuffer::HistoryBuffer() takes the state size.
Expand All @@ -23,9 +25,14 @@
##### `Filtering functions`
- Added pure public virtual method GaussianPrediction::getStateModel() (required to properly implement GPFPrediction::getStateModel()).
- Implemented method KFPrediction::getStateModel().
- Implemented method KFCorrection::getLikelihood().
- Implemented method UKFPrediction::getStateModel().
- Implemented method UKFCorrection::getLikelihood().
- Changed implementation of GaussianLikelihood::likelihood().
- Changed implementation of GPFPrediction::getStateModel().
- Changed implementation of GPFCorrection::getLikelihood().
- Changed implementation of GPFCorrection::evaluateProposal().
- Changed implementation of WhiteNoiseAcceleration::getTransitionProbability().
- Fixed missing const(s) keywords in signature of method StateModel::getTransitionProbability().
- Fixed missing const(s) keywords in signature of method WhiteNoiseAcceleration::getTransitionProbability().
- SUKFCorrection::getNoiseCovarianceMatrix() is now virtual.
Expand Down
7 changes: 6 additions & 1 deletion src/BayesFilters/include/BayesFilters/KFCorrection.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,16 @@ class bfl::KFCorrection : public GaussianCorrection

MeasurementModel& getMeasurementModel() override;

std::pair<bool, Eigen::VectorXd> getLikelihood() override;

protected:
void correctStep(const GaussianMixture& pred_state, GaussianMixture& corr_state) override;

private:
std::unique_ptr<LinearMeasurementModel> measurement_model_;

Eigen::MatrixXd innovations_;

GaussianMixture meas_covariances_;
};

#endif /* KFCORRECTION_H */
6 changes: 6 additions & 0 deletions src/BayesFilters/include/BayesFilters/UKFCorrection.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ class bfl::UKFCorrection : public GaussianCorrection

MeasurementModel& getMeasurementModel() override;

std::pair<bool, Eigen::VectorXd> getLikelihood() override;

protected:
void correctStep(const GaussianMixture& pred_state, GaussianMixture& corr_state) override;

Expand All @@ -53,6 +55,10 @@ class bfl::UKFCorrection : public GaussianCorrection
* Unscented transform weight.
*/
sigma_point::UTWeight ut_weight_;

Eigen::MatrixXd innovations_;

GaussianMixture predicted_meas_;
};

#endif /* UKFCORRECTION_H */
38 changes: 38 additions & 0 deletions src/BayesFilters/include/BayesFilters/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,44 @@ std::unique_ptr<T> make_unique(Args&& ...args)
double log_sum_exp(const Eigen::Ref<const Eigen::VectorXd>& arguments);


/**
* Evaluate the logarithm of a multivariate Gaussian probability density function.
*
* @param input Input representing the argument of the function as a vector or matrix.
* @param mean The mean of the associated Gaussian distribution as a vector.
* @param covariance The covariance matrix of the associated Gaussian distribution as a matrix.
*
* @return The value of the logarithm of the density function evaluated on the input data as a vector.
*/
template<typename Derived>
Eigen::VectorXd multivariate_gaussian_log_density(const Eigen::MatrixBase<Derived>& input, const Eigen::Ref<const Eigen::VectorXd>& mean, const Eigen::Ref<const Eigen::MatrixXd>& covariance)
{
const auto diff = input.colwise() - mean;

Eigen::VectorXd values(diff.cols());
for (std::size_t i = 0; i < diff.cols(); i++)
values(i) = - 0.5 * (static_cast<double>(diff.rows()) * std::log(2.0 * M_PI) + std::log(covariance.determinant()) + (diff.col(i).transpose() * covariance.inverse() * diff.col(i)));

return values;
}


/**
* Evaluate a multivariate Gaussian probability density function.
*
* @param input Input representing the argument of the function as a vector or matrix.
* @param mean The mean of the associated Gaussian distribution as a vector.
* @param covariance The covariance matrix of the associated Gaussian distribution as a matrix.
*
* @return The value of the density function evaluated on the input data as a vector.
*/
template<typename Derived>
Eigen::VectorXd multivariate_gaussian_density(const Eigen::MatrixBase<Derived>& input, const Eigen::Ref<const Eigen::VectorXd>& mean, const Eigen::Ref<const Eigen::MatrixXd>& covariance)
{
return multivariate_gaussian_log_density(input, mean, covariance).array().exp();
}


/**
* This template class provides methods to keep track of time. The default time unit is milliseconds,
* but can be changed during object creation using a different std::chrono::duration type.
Expand Down
5 changes: 2 additions & 3 deletions src/BayesFilters/src/GPFCorrection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
*/

#include <BayesFilters/GPFCorrection.h>
#include <BayesFilters/utils.h>

#include <Eigen/Cholesky>

Expand Down Expand Up @@ -144,7 +145,5 @@ double GPFCorrection::evaluateProposal
{
/* Evaluate the proposal distribution, a Gaussian centered in 'mean' and having
covariance 'covariance', in the state 'state'. */
VectorXd difference = state - mean;

return (-0.5 * static_cast<double>(difference.size()) * log(2.0 * M_PI) -0.5 * log(covariance.determinant()) -0.5 * (difference.transpose() * covariance.inverse() * difference).array()).exp().coeff(0);
return utils::multivariate_gaussian_density(state, mean, covariance).coeff(0);
}
5 changes: 2 additions & 3 deletions src/BayesFilters/src/GaussianLikelihood.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
*/

#include <BayesFilters/GaussianLikelihood.h>
#include <BayesFilters/utils.h>

using namespace bfl;
using namespace Eigen;
Expand Down Expand Up @@ -68,9 +69,7 @@ std::pair<bool, VectorXd> GaussianLikelihood::likelihood
if (!valid_covariance_matrix)
return std::make_pair(false, VectorXd::Zero(1));


for (unsigned int i = 0; i < innovations.cols(); ++i)
likelihood(i) = scale_factor_ * (-0.5 * static_cast<double>(innovations.rows()) * log(2.0*M_PI) - 0.5 * log(covariance_matrix.determinant()) - 0.5 * (innovations.col(i).transpose() * covariance_matrix.inverse() * innovations.col(i)).array()).exp().coeff(0);
likelihood = scale_factor_ * utils::multivariate_gaussian_density(innovations, VectorXd::Zero(innovations.rows()), covariance_matrix);

return std::make_pair(true, likelihood);
}
32 changes: 26 additions & 6 deletions src/BayesFilters/src/KFCorrection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
*/

#include <BayesFilters/KFCorrection.h>
#include <BayesFilters/utils.h>

using namespace bfl;
using namespace Eigen;
Expand All @@ -31,6 +32,21 @@ MeasurementModel& KFCorrection::getMeasurementModel()
}


std::pair<bool, VectorXd> KFCorrection::getLikelihood()
{
if ((innovations_.rows() == 0) || (innovations_.cols() == 0))
return std::make_pair(false, VectorXd());

VectorXd likelihood(innovations_.cols());
for (std::size_t i = 0; i < likelihood.size(); i++)
{
likelihood(i) = utils::multivariate_gaussian_density(innovations_.col(i), VectorXd::Zero(innovations_.rows()), meas_covariances_.covariance(i)).coeff(0);
}

return std::make_pair(true, likelihood);
}


void KFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixture& corr_state)
{
/* Get the current measurement if available. */
Expand Down Expand Up @@ -79,25 +95,29 @@ void KFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtur
MatrixXd H = measurement_model_->getMeasurementMatrix();

/* Cast innovations once for all. */
MatrixXd innovations = any::any_cast<MatrixXd&&>(std::move(innovation));
innovations_ = any::any_cast<MatrixXd&&>(std::move(innovation));

/* Initialize measurement covariances.
GaussianMixture will effectively resize only if it needs to. */
meas_covariances_.resize(pred_state.components, H.rows());

/* Process all the components in the mixture. */
for (size_t i=0; i < pred_state.components; i++)
for (size_t i = 0; i < pred_state.components; i++)
{
/* Evaluate the measurement covariance matrix
Py = H * Px * H' + R */
MatrixXd Py = H * pred_state.covariance(i) * H.transpose() + R;
meas_covariances_.covariance(i) = H * pred_state.covariance(i) * H.transpose() + R;

/* Evaluate the Kalman Gain
K = Px * H' * (Py)^{-1} */
MatrixXd K = pred_state.covariance(i) * H.transpose() * Py.inverse();
MatrixXd K = pred_state.covariance(i) * H.transpose() * meas_covariances_.covariance(i).inverse();

/* Evaluate the filtered mean
x_{k}+ = x{k}- + K * (y - y_predicted) */
corr_state.mean(i) = pred_state.mean(i) + K * innovations.col(i);
corr_state.mean(i) = pred_state.mean(i) + K * innovations_.col(i);

/* Evaluate the filtered covariance
P_{k}+ = P_{k}- - K * Py * K' */
corr_state.covariance(i).noalias() = pred_state.covariance(i) - K * Py * K.transpose();
corr_state.covariance(i).noalias() = pred_state.covariance(i) - K * meas_covariances_.covariance(i) * K.transpose();
}
}
35 changes: 26 additions & 9 deletions src/BayesFilters/src/UKFCorrection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
*/

#include <BayesFilters/UKFCorrection.h>
#include <BayesFilters/utils.h>

using namespace bfl;
using namespace bfl::sigma_point;
Expand Down Expand Up @@ -61,6 +62,21 @@ MeasurementModel& UKFCorrection::getMeasurementModel()
}


std::pair<bool, VectorXd> UKFCorrection::getLikelihood()
{
if ((innovations_.rows() == 0) || (innovations_.cols() == 0))
return std::make_pair(false, VectorXd());

VectorXd likelihood(innovations_.cols());
for (std::size_t i = 0; i < innovations_.cols(); i++)
{
likelihood(i) = utils::multivariate_gaussian_density(innovations_.col(i), VectorXd::Zero(innovations_.rows()), predicted_meas_.covariance(i)).coeff(0);
}

return std::make_pair(true, likelihood);
}


void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixture& corr_state)
{
/* Pick the correct measurement model. */
Expand All @@ -80,7 +96,8 @@ void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtu
/* Initialize predicted measurement GaussianMixture. */
std::pair<std::size_t, std::size_t> meas_sizes = model.getOutputSize();
std::size_t meas_size = meas_sizes.first + meas_sizes.second;
GaussianMixture pred_meas(pred_state.components, meas_size);
/* GaussianMixture will effectively resize only if it needs to. */
predicted_meas_.resize(pred_state.components, meas_size);

/* Evaluate the joint state-measurement statistics, if possible. */
bool valid = false;
Expand All @@ -94,11 +111,11 @@ void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtu
std::tie(std::ignore, noise_covariance_matrix) = model.getNoiseCovarianceMatrix();
pred_state_augmented.augmentWithNoise(noise_covariance_matrix);

std::tie(valid, pred_meas, Pxy) = sigma_point::unscented_transform(pred_state_augmented, ut_weight_, *measurement_model_);
std::tie(valid, predicted_meas_, Pxy) = sigma_point::unscented_transform(pred_state_augmented, ut_weight_, *measurement_model_);
}
else if (type_ == UKFCorrectionType::Additive)
{
std::tie(valid, pred_meas, Pxy) = sigma_point::unscented_transform(pred_state, ut_weight_, *additive_measurement_model_);
std::tie(valid, predicted_meas_, Pxy) = sigma_point::unscented_transform(pred_state, ut_weight_, *additive_measurement_model_);
}

if (!valid)
Expand All @@ -113,8 +130,8 @@ void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtu
/* This temporary is required since some MeasurementModel::innovation methods may try to cast from
const Ref<const MatrixXd> to MatrixXd resulting in a bfl::any::bad_any_cast.

Hopefully, using std::move, it is possible to steal the memory from pred_meas.mean(). */
MatrixXd y_p = std::move(pred_meas.mean());
Hopefully, using std::move, it is possible to steal the memory from predicted_meas_.mean(). */
MatrixXd y_p = std::move(predicted_meas_.mean());
std::tie(valid_innovation, innovation) = model.innovation(y_p, measurement);

if (!valid_innovation)
Expand All @@ -124,21 +141,21 @@ void UKFCorrection::correctStep(const GaussianMixture& pred_state, GaussianMixtu
}

/* Cast innovations once for all. */
MatrixXd innovations = any::any_cast<MatrixXd&&>(std::move(innovation));
innovations_ = any::any_cast<MatrixXd&&>(std::move(innovation));

/* Process all the components in the mixture. */
for (size_t i=0; i < pred_state.components; i++)
{
/* Evaluate the Kalman Gain
K = Pxy * (Py)^{-1} */
MatrixXd K = Pxy.middleCols(meas_size * i, meas_size) * pred_meas.covariance(i).inverse();
MatrixXd K = Pxy.middleCols(meas_size * i, meas_size) * predicted_meas_.covariance(i).inverse();

/* Evaluate the filtered mean.
x_{k}+ = x{k}- + K * innovation */
corr_state.mean(i) = pred_state.mean(i) + K * innovations.col(i);
corr_state.mean(i) = pred_state.mean(i) + K * innovations_.col(i);

/* Evaluate the filtered covariance
P_{k}+ = P_{k}- - K * Py * K' */
corr_state.covariance(i) = pred_state.covariance(i) - K * pred_meas.covariance(i) * K.transpose();
corr_state.covariance(i) = pred_state.covariance(i) - K * predicted_meas_.covariance(i) * K.transpose();
}
}
12 changes: 2 additions & 10 deletions src/BayesFilters/src/WhiteNoiseAcceleration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
*/

#include <BayesFilters/WhiteNoiseAcceleration.h>
#include <BayesFilters/utils.h>

#include <cmath>
#include <utility>
Expand Down Expand Up @@ -139,16 +140,7 @@ MatrixXd WhiteNoiseAcceleration::getStateTransitionMatrix()

VectorXd WhiteNoiseAcceleration::getTransitionProbability(const Ref<const MatrixXd>& prev_states, const Ref<const MatrixXd>& cur_states)
{
VectorXd probabilities(prev_states.cols());
MatrixXd differences = cur_states - prev_states;

std::size_t size = differences.rows();
for (std::size_t i = 0; i < prev_states.cols(); i++)
{
probabilities(i) = (-0.5 * static_cast<double>(size) * log(2.0 * M_PI) + -0.5 * log(Q_.determinant()) -0.5 * (differences.col(i).transpose() * Q_.inverse() * differences.col(i)).array()).exp().coeff(0);
}

return probabilities;
return utils::multivariate_gaussian_density(prev_states, prev_states.col(0), Q_);
}


Expand Down