Skip to content
Merged

Dev #23

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
190 changes: 156 additions & 34 deletions include/controlpp/Estimators.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace controlpp
*/
template<class T, int XRows, int XCols, int XOpt, int XMaxRows, int XMaxCols>
requires((XRows >= XCols) || (XRows == Eigen::Dynamic) || (XCols == Eigen::Dynamic))
Eigen::Vector<T, XCols> least_squares(const Eigen::Matrix<T, XRows, XCols, XOpt, XMaxRows, XMaxCols>& X, const Eigen::Vector<T, XRows>& y) noexcept {
Eigen::Vector<T, XCols> least_squares(const Eigen::Matrix<T, XRows, XCols, XOpt, XMaxRows, XMaxCols>& X, const Eigen::Vector<T, XRows>& y) {
Eigen::Vector<T, XCols> result = X.colPivHouseholderQr().solve(y).eval();
return result;
}
Expand All @@ -45,20 +45,26 @@ namespace controlpp
*
* ---
*
* Note that for a number of outputs greater than one (`NOutputs>1`)
* For a number of outputs greater than one (`NOutputs>1`)
* the model uses the 'Multi-Output System with shared parameter vector'.
*
* This allows to use multiple sensors observing the same
* system states and parameters to increase the estimation result.
*
* There is a default regularisation therm (default: 1e-9) added to the diagonal elements of the covariance updata
*
* There is gain clamping (default: [-10, +10]) applied to the parameter update therem K.
*/
template<class T, size_t NParams, size_t NMeasurements = 1>
class ReccursiveLeastSquares{
private:

Eigen::Matrix<T, NParams, NParams> _cov; ///< previous covariance
Eigen::Vector<T, NParams> _param; ///< previous parameter estimate
double _memory = 0.98;
Eigen::Matrix<T, NParams, NParams> _cov; ///< previous covariance
Eigen::Vector<T, NParams> _param; ///< previous parameter estimate
Eigen::Vector<T, NParams> _K;
T _memory = 0.98;
T _cov_regularisation = 1e-9;
T _gain_clamp = 10;


public:

Expand All @@ -80,21 +86,18 @@ namespace controlpp
* - `memory` < 1: forgetting older values with an exponential decay
* Often used values are between 0.8 and 0.98
*
* \param cov_reg A value that will be added to the diagonal of the covariance matrix at each update
* to prevent the covariance to be become too small, ill formed and unregular. This is mainly to increase numerical stability.
*/
inline ReccursiveLeastSquares(
const Eigen::Vector<T, NParams>& param_hint = Eigen::Vector<T, NParams>().setOne(),
const Eigen::Matrix<T, NParams, NParams>& cov_hint = (Eigen::Matrix<T, NParams, NParams>::Identity() * T(1000)),
double memory = 0.95
)
: _cov(cov_hint)
, _param(param_hint)
T memory = 0.99)
: _param(param_hint)
, _memory(memory)
{
if(memory <= T(0) || memory > T(1)){
throw std::invalid_argument("Error: ReccursiveLeastSquares::ReccursiveLeastSquares(): memory has to be in the open-closed range of: (0, 1]");
}
this->_cov = (Eigen::Matrix<T, NParams, NParams>::Identity() * T(1000));
this->_K.setZero();
}

/**
Expand All @@ -103,30 +106,37 @@ namespace controlpp
* \param s The known system inputs
* \returns The new parameter state estimate
*/
void add(const T& y, const Eigen::Matrix<T, NMeasurements, NParams>& s) noexcept {
const Eigen::Matrix<T, NMeasurements, NMeasurements> I = Eigen::Matrix<T, NMeasurements, NMeasurements>::Identity();
void add(const T& y, const Eigen::Matrix<T, NMeasurements, NParams>& s) {
this->_cov.diagonal().array() += this->_cov_regularisation;

// Gain
const auto A = (this->_cov * s.transpose()).eval();
const auto B = (this->_memory * I + s * this->_cov * s.transpose()).eval();
const Eigen::Matrix<T, NMeasurements, NMeasurements> I_m = Eigen::Matrix<T, NMeasurements, NMeasurements>::Identity();
auto B1 = (s * this->_cov * s.transpose()).eval();
B1.diagonal().array() += this->_memory;
const auto B = ((B1 + B1.transpose())/2).eval(); // force symetry

// calculate: K = A * B^-1
const Eigen::Vector<T, NParams> K = B.transpose().llt().solve(A.transpose()).transpose().eval();
this->_K = B.transpose().llt().solve(A.transpose()).transpose().eval();

// Limit the update gain
for(int i = 0; i < this->_K.size(); ++i){
this->_K.at(i) = std::clamp(this->_K.at(i), -this->_gain_clamp, this->_gain_clamp);
}

// Update
const Eigen::Vector<T, NParams> new_param = (this->_param + K * (y - s * this->_param)).eval();
this->_param = new_param;
this->_param += this->_K * (y - s * this->_param);

// Covariance
const Eigen::Matrix<T, NParams, NParams> new_cov = (this->_cov - K * s * this->_cov).eval() / this->_memory;
this->_cov = new_cov;
this->_cov -= this->_K * s * this->_cov;
this->_cov /= this->_memory;
}

/**
* \brief returns the current best estimate
* \returns the parameter vector
*/
inline const Eigen::Vector<T, NParams>& estimate() const noexcept {return this->_param;}
inline const Eigen::Vector<T, NParams>& estimate() const {return this->_param;}

/**
* \brief returns the current covariance
Expand All @@ -135,7 +145,70 @@ namespace controlpp
*
* \returns the current covariance matrix
*/
inline const Eigen::Matrix<T, NParams, NParams>& covariance() const noexcept {return this->_cov;}
[[nodiscard]] inline const Eigen::Matrix<T, NParams, NParams>& cov() const {return this->_cov;}

inline void set_cov(const Eigen::Matrix<T, NParams, NParams>& cov) {
this->_cov = cov;
}

/**
* @brief Sets the memory factor
*
* The memory factor [0, 1], where:
* - larger values: Old parameters and inputs are remembered for longer (slower changes, more robust to noise).
* - smaller values: Old parameters are forgotten more quickly (faster changes)
*
* @param memory The new memory factor
*/
inline void set_memory(const T& memory){
this->_memory = memory;
}

/**
* @brief Returns the memory factor
*
* The memory factor [0, 1], where:
* - larger values: Old parameters and inputs are remembered for longer (slower changes, more robust to noise).
* - smaller values: Old parameters are forgotten more quickly (faster changes)
*
* @returns The current memory factor
*/
[[nodiscard]] inline const T& memory() const {
return this->_memory;
}

/**
* \brief Limits the update gain K.
*
* Limits the update gain K from -gain_clamp to +gain_clamp.
* Prevents too fast updates and ill conditioned updates. For example from a lack of excitation variety
*
* default is 10
*
* \param gain_clamp The new gain clamp
*/
inline void set_gain_clamp(const T& gain_clamp) {
this->_gain_clamp = gain_clamp;
}

/**
* @brief Returns the currect gain clamp factor
* @return The currect gain clamp factor
*/
[[nodiscard]] inline const T& gain_clamp(){
return this->_gain_clamp;
}



/**
* @brief Returns the gain K used in the parameter update
*
* The gain K can be seen as a measure of uncertainty
*
* @return The gain vector;
*/
inline const Eigen::Vector<T, NParams>& gain() const {return this->_K;}
};

/**
Expand Down Expand Up @@ -174,7 +247,11 @@ namespace controlpp

Eigen::Matrix<T, NParams, NParams> _cov; ///< previous covariance
Eigen::Vector<T, NParams> _param; ///< previous parameter estimate
double _memory = 0.98;
Eigen::Vector<T, NParams> _K;
T _memory = 0.98;
T _cov_regularisation = 1e-9;
T _gain_clamp = 10;


public:

Expand All @@ -196,20 +273,25 @@ namespace controlpp
* - `memory` < 1: forgetting older values with an exponential decay
* Often used values are between `0.9 `and `0.995`.
*
* \param cov_reg A value that will be added to the diagonal of the covariance matrix at each update
* \param cov_regularisation A value that will be added to the diagonal of the covariance matrix before each update
* to prevent the covariance to be become too small, ill formed and unregular. This is mainly to increase numerical stability.
*
*/
inline ReccursiveLeastSquares(
const Eigen::Vector<T, NParams>& param_hint = Eigen::Vector<T, NParams>().setOnes(),
const Eigen::Matrix<T, NParams, NParams>& cov_hint = (Eigen::Matrix<T, NParams, NParams>::Identity() * T(1000)),
double memory = 0.99)
T memory = 0.99,
T cov_regularisation = 1e-9
)
: _cov(cov_hint)
, _param(param_hint)
, _memory(memory)
, _cov_regularisation(cov_regularisation)
{
if(memory <= T(0) || memory > T(1)){
throw std::invalid_argument("Error: ReccursiveLeastSquares::ReccursiveLeastSquares(): memory has to be in the open-closed range of: (0, 1]");
}
this->_K.setZero();
}

inline void set_cov(const Eigen::Matrix<T, NParams, NParams>& cov){
Expand All @@ -224,29 +306,44 @@ namespace controlpp
this->_memory = memory;
}

inline void set_gain_clamp(const T& gain_clamp){
this->_gain_clamp = gain_clamp;
}

[[nodiscard]] inline const T& gain_clamp() const {
return this->_gain_clamp;
}

/**
* \brief Adds a new input output pair that updates the estimate
* \param y The new system measurements/outputs
* \param s The known system inputs
* \returns The new parameter state estimate
*/
void add(const T& y, const Eigen::Vector<T, NParams>& s) noexcept {
void add(const T& y, const Eigen::Vector<T, NParams>& s) {
this->_cov.diagonal().array() += this->_cov_regularisation;

// Gain
const Eigen::Vector<T, NParams> K = ((this->_cov * s) / (this->_memory + s.transpose() * this->_cov * s)).eval();
const auto A = (this->_cov * s).eval();
const T B = this->_memory + s.transpose() * this->_cov * s;
this->_K = A / B;

for(int i = 0; i < this->_K.size(); ++i){
this->_K(i) = std::clamp(this->_K(i), -this->_gain_clamp, this->_gain_clamp);
}

// Update
const Eigen::Vector<T, NParams> new_param = (this->_param + K * (y - s.transpose() * this->_param)).eval();
this->_param = new_param;
this->_param += this->_K * (y - s.transpose() * this->_param);

const Eigen::Matrix<T, NParams, NParams> new_cov = ((this->_cov - K * s.transpose() * this->_cov) / this->_memory).eval();
this->_cov = new_cov;
this->_cov -= this->_K * s.transpose() * this->_cov;
this->_cov /= this->_memory;
}

/**
* \brief returns the current best estimate
* \returns the parameter vector
*/
inline const Eigen::Vector<T, NParams>& estimate() const noexcept {return this->_param;}
inline const Eigen::Vector<T, NParams>& estimate() const {return this->_param;}

/**
* \brief returns the current covariance
Expand All @@ -255,7 +352,16 @@ namespace controlpp
*
* \returns the current covariance matrix
*/
inline const Eigen::Matrix<T, NParams, NParams>& cov() const noexcept {return this->_cov;}
inline const Eigen::Matrix<T, NParams, NParams>& cov() const {return this->_cov;}

/**
* @brief Returns the gain used for the updata
*
* The gain can be seen as a measurement of uncertainty
*
* @return The gain used in the parameter update
*/
inline const Eigen::Vector<T, NParams>& gain() const {return this->_K;}
};

/**
Expand Down Expand Up @@ -369,6 +475,22 @@ namespace controlpp
return result;
}

const Eigen::Matrix<T, NumOrder+1 + DenOrder, NumOrder+1 + DenOrder> cov(){
return this->rls.cov();
}

const Eigen::Vector<T, NumOrder+1 + DenOrder>& gain(){
return this->rls.gain();
}

void set_gain_clamp(const T& g){
this->rls.set_gain_clamp(g);
}

[[nodiscard]] const T& gain_clamp() const {
return this->rls.gain_clamp();
}

};

} // namespace controlpp
7 changes: 4 additions & 3 deletions include/controlpp/KalmanFilter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -259,9 +259,10 @@ namespace controlpp
// > note S is already symetric, so no need to transpose
//
// K = (S.inverse() * U.transpose()).transpose()
const auto U = P_p * _H.transpose();
const auto S = _H * P_p * _H.transpose() + _R;
const Eigen::Matrix<T, NStates, NMeasurements> K = (S.ldlt().solve(U.transpose())).transpose();
const auto U = (P_p * _H.transpose()).eval();
const auto S1 = _H * P_p * _H.transpose() + _R;
const auto S = ((S1 + S1.transpose()) / 2).eval();
const Eigen::Matrix<T, NStates, NMeasurements> K = (S.llt().solve(U.transpose())).transpose();

_x = x_p + K * (z - _H * x_p);

Expand Down
4 changes: 2 additions & 2 deletions include/controlpp/TransferFunction.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -203,12 +203,12 @@ namespace controlpp

void print(std::ostream& stream, std::string_view var="x") const {
stream << "num: "; this->num().print(stream, var);
stream << "\nden: "; this->den().print(stream, var); stream << '\n';
stream << "\nden: "; this->den().print(stream, var);
}

void print(std::ostream& stream, std::function<std::string(int i)> var) const {
stream << "num: "; this->num().print(stream, var);
stream << "\nden: "; this->den().print(stream, var); stream << '\n';
stream << "\nden: "; this->den().print(stream, var);
}

friend std::ostream& operator<<(std::ostream& stream, const TransferFunction& rpoly){
Expand Down
10 changes: 9 additions & 1 deletion tests/Estimators_Tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,13 @@ TEST(Estimators, DtfEstimator){
const double u = (i == 0) ? 0.0 : 1.0;
const double y = dssf.input(u);
dtf_est.add(y, u);
// std::cout << "iteration: " << i << std::endl;
// std::cout << "- estimage:" << std::endl;
// std::cout << dtf_est.estimate() << std::endl;
// std::cout << "- covariance:" << std::endl;
// std::cout << dtf_est.cov() << std::endl;
// std::cout << "- gain:" << std::endl;
// std::cout << dtf_est.gain().transpose() << '\n' << std::endl;
}

const auto Gz_est = dtf_est.estimate();
Expand All @@ -96,7 +103,8 @@ TEST(Estimators, DtfEstimator){
const double u = 1.0;
const double y = dssf.input(u);
const double y_est = dssf_est.input(u);
ASSERT_NEAR(y, y_est, 0.01);
// std::cout << "iteration: " << i << ", y: " << y << ", y_est: " << y_est << std::endl;
ASSERT_NEAR(y, y_est, 0.05);
}

}