diff --git a/stan/math/prim/fun/matrix_exp_action_handler.hpp b/stan/math/prim/fun/matrix_exp_action_handler.hpp index 9c7bcfc1230..92bb8117671 100644 --- a/stan/math/prim/fun/matrix_exp_action_handler.hpp +++ b/stan/math/prim/fun/matrix_exp_action_handler.hpp @@ -3,6 +3,8 @@ #include #include +#include +#include #include #include @@ -14,26 +16,42 @@ namespace math { * "Computing the Action of the Matrix Exponential, * with an Application to Exponential Integrators" * Read More: https://epubs.siam.org/doi/abs/10.1137/100788860 + * See also: + * https://www.mathworks.com/matlabcentral/fileexchange/29576-matrix-exponential-times-a-vector * * Calculates exp(mat*t)*b, where mat & b are matrices, * and t is double. */ class matrix_exp_action_handler { - static constexpr int p_max = 8; - static constexpr int m_max = 55; - static constexpr double tol = 1.1e-16; - - // table 3.1 in the reference - const std::vector theta_m_single_precision{ - 1.3e-1, 1.0e0, 2.2e0, 3.6e0, 4.9e0, 6.3e0, - 7.7e0, 9.1e0, 1.1e1, 1.2e1, 1.3e1}; - const std::vector theta_m_double_precision{ - 2.4e-3, 1.4e-1, 6.4e-1, 1.4e0, 2.4e0, 3.5e0, - 4.7e0, 6.0e0, 7.2e0, 8.5e0, 9.9e0}; - - double l1norm(const Eigen::MatrixXd& m) { - return m.colwise().lpNorm<1>().maxCoeff(); - } + const int _p_max = 8; + const int _m_max = 55; + const double _tol = 1.1e-16; // from the paper, double precision: 2^-53 + const std::vector _theta_m{ + 2.22044605e-16, 2.58095680e-08, 1.38634787e-05, 3.39716884e-04, + 2.40087636e-03, 9.06565641e-03, 2.38445553e-02, 4.99122887e-02, + 8.95776020e-02, 1.44182976e-01, 2.14235807e-01, 2.99615891e-01, + 3.99777534e-01, 5.13914694e-01, 6.41083523e-01, 7.80287426e-01, + 9.30532846e-01, 1.09086372e+00, 1.26038106e+00, 1.43825260e+00, + 1.62371595e+00, 1.81607782e+00, 2.01471078e+00, 2.21904887e+00, + 2.42858252e+00, 2.64285346e+00, 2.86144963e+00, 3.08400054e+00, + 3.31017284e+00, 3.53966635e+00, 3.77221050e+00, 4.00756109e+00, + 4.24549744e+00, 4.48581986e+00, 4.72834735e+00, 4.97291563e+00, + 5.21937537e+00, 5.46759063e+00, 5.71743745e+00, 5.96880263e+00, + 6.22158266e+00, 6.47568274e+00, 6.73101590e+00, 6.98750228e+00, + 7.24506843e+00, 7.50364669e+00, 7.76317466e+00, 8.02359473e+00, + 8.28485363e+00, 8.54690205e+00, 8.80969427e+00, 9.07318789e+00, + 9.33734351e+00, 9.60212447e+00, 9.86749668e+00, 1.01334283e+01, + 1.03998897e+01, 1.06668532e+01, 1.09342929e+01, 1.12021845e+01, + 1.14705053e+01, 1.17392341e+01, 1.20083509e+01, 1.22778370e+01, + 1.25476748e+01, 1.28178476e+01, 1.30883399e+01, 1.33591369e+01, + 1.36302250e+01, 1.39015909e+01, 1.41732223e+01, 1.44451076e+01, + 1.47172357e+01, 1.49895963e+01, 1.52621795e+01, 1.55349758e+01, + 1.58079765e+01, 1.60811732e+01, 1.63545578e+01, 1.66281227e+01, + 1.69018609e+01, 1.71757655e+01, 1.74498298e+01, 1.77240478e+01, + 1.79984136e+01, 1.82729215e+01, 1.85475662e+01, 1.88223426e+01, + 1.90972458e+01, 1.93722711e+01, 1.96474142e+01, 1.99226707e+01, + 2.01980367e+01, 2.04735082e+01, 2.07490816e+01, 2.10247533e+01, + 2.13005199e+01, 2.15763782e+01, 2.18523250e+01, 2.21283574e+01}; public: /** @@ -50,43 +68,73 @@ class matrix_exp_action_handler { const double& t = 1.0) { Eigen::MatrixXd A = mat; double mu = A.trace() / A.rows(); - for (int i = 0; i < A.rows(); ++i) { - A(i, i) -= mu; - } + A.diagonal().array() -= mu; + + int m, s; + set_approx_order(A, b, t, m, s); + + double eta = exp(t * mu / s); + + const auto& b_eval = b.eval(); + Eigen::MatrixXd f = b_eval; + Eigen::MatrixXd bi = b_eval; - int m{0}, s{0}; - set_approximation_parameter(A, t, m, s); - - Eigen::MatrixXd res(A.rows(), b.cols()); - - for (int col = 0; col < b.cols(); ++col) { - bool conv = false; - Eigen::VectorXd B = b.col(col); - Eigen::VectorXd F = B; - const auto eta = std::exp(t * mu / s); - for (int i = 1; i < s + 1; ++i) { - auto c1 = B.template lpNorm(); - if (m > 0) { - for (int j = 1; j < m + 1; ++j) { - B = t * A * B / (s * j); - auto c2 = B.template lpNorm(); - F += B; - if (c1 + c2 < tol * F.template lpNorm()) { - conv = true; - break; - } - c1 = c2; - } - } - F *= eta; - B = F; - if (conv) { + for (int i = 0; i < s; ++i) { + // maximum absolute row sum, aka inf-norm of matrix operator + double c1 = matrix_operator_inf_norm(bi); + for (int j = 0; j < m; ++j) { + bi = (t / (s * (j + 1))) * (A * bi); + f += bi; + double c2 = matrix_operator_inf_norm(bi); + if (c1 + c2 < _tol * matrix_operator_inf_norm(f)) break; - } + c1 = c2; + } + f *= eta; + bi = f; + } + + return f; + } + + /** + * Eigen expression for matrix operator infinity norm + * + * @param mat matrix + */ + double matrix_operator_inf_norm(Eigen::MatrixXd const& x) { + return x.cwiseAbs().rowwise().sum().maxCoeff(); + } + + /** + * Estimate the 1-norm of mat^m + * + * See A. H. Al-Mohy and N. J. Higham, A New Scaling and Squaring + * Algorithm for the Matrix Exponential, SIAM J. Matrix Anal. Appl. 31(3): + * 970-989, 2009. + * + * For positive matrices the results is exact. Otherwise it falls + * back to Eigen's norm, which is only + * efficient for small & medium-size matrices (n < 100). Large size + * matrices require a more efficient 1-norm approximation + * algorithm such as normest1. See, e.g., + * https://hg.savannah.gnu.org/hgweb/octave/file/e35866e6a2e0/scripts/linear-algebra/normest1.m + * + * @param mat matrix + * @param m power + */ + template * = nullptr, + require_all_st_same* = nullptr> + double mat_power_1_norm(const EigMat1& mat, int m) { + if ((mat.array() > 0.0).all()) { + Eigen::VectorXd e = Eigen::VectorXd::Constant(mat.rows(), 1.0); + for (int j = 0; j < m; ++j) { + e = mat.transpose() * e; } - res.col(col) = F; - } // loop b columns - return res; + return e.lpNorm(); + } else { + return mat.pow(m).cwiseAbs().colwise().sum().maxCoeff(); + } } /** @@ -100,27 +148,64 @@ class matrix_exp_action_handler { * (paragraph between eq. 3.11 & eq. 3.12). * * @param [in] mat matrix A + * @param [in] b matrix B * @param [in] t double t in exp(A*t)*B - * @param [out] m int parameter m - * @param [out] s int parameter s + * @param [out] m int parameter m; m >= 0, m < _m_max + * @param [out] s int parameter s; s >= 1 */ - inline void set_approximation_parameter(const Eigen::MatrixXd& mat, - const double& t, int& m, int& s) { - if (l1norm(mat) < tol || t < tol) { + template * = nullptr, + require_all_st_same* = nullptr> + inline void set_approx_order(const EigMat1& mat, const EigMat2& b, + const double& t, int& m, int& s) { + if (t < _tol) { + m = 0; + s = 1; + return; + } + + // L1 norm + double normA = mat.colwise().template lpNorm<1>().maxCoeff(); + if (normA < _tol) { m = 0; s = 1; + return; + } + + Eigen::VectorXd alpha(_p_max - 1); + if (normA * (_m_max * b.cols()) + < 4.0 * _theta_m[_m_max] * _p_max * (_p_max + 3)) { + alpha = Eigen::VectorXd::Constant(_p_max - 1, normA); } else { - m = m_max; + Eigen::VectorXd eta(_p_max); + for (int p = 0; p < _p_max; ++p) { + eta[p] = std::pow(mat_power_1_norm(mat, p + 2), 1.0 / (p + 2)); + } + for (int p = 0; p < _p_max - 1; ++p) { + alpha[p] = std::max(eta[p], eta[p + 1]); + } + } - Eigen::MatrixXd a = mat * t; - const double theta_m = theta_m_double_precision.back(); - for (auto i = 0; i < std::ceil(std::log2(p_max)); ++i) { - a *= a; + Eigen::MatrixXd mt = Eigen::MatrixXd::Zero(_p_max - 1, _m_max); + for (int p = 1; p < _p_max; ++p) { + for (int i = p * (p + 1) - 2; i < _m_max; ++i) { + mt(p - 1, i) = alpha[p - 1] / _theta_m[i]; } - double ap = std::pow(l1norm(a), 1.0 / p_max); - int c = std::ceil(ap / theta_m); - s = (c < 1 ? 1 : c); } + + Eigen::VectorXd u = Eigen::VectorXd::LinSpaced(_m_max, 1, _m_max); + Eigen::MatrixXd c = stan::math::ceil(mt) * u.asDiagonal(); + for (Eigen::MatrixXd::Index i = 0; i < c.size(); ++i) { + if (c(i) <= 1.e-16) { + c(i) = std::numeric_limits::infinity(); + } + } + int cost = c.colwise().minCoeff().minCoeff(&m); + if (std::isinf(cost)) { + s = 1; + return; + } + s = std::max(cost / m, 1); } }; diff --git a/test/unit/math/prim/fun/matrix_exp_action_handler_test.cpp b/test/unit/math/prim/fun/matrix_exp_action_handler_test.cpp index c240df6bc54..78e0ed48f2f 100644 --- a/test/unit/math/prim/fun/matrix_exp_action_handler_test.cpp +++ b/test/unit/math/prim/fun/matrix_exp_action_handler_test.cpp @@ -68,14 +68,6 @@ TEST(MathMatrixRevMat, matrix_exp_action_vector) { for (size_t i = 0; i < n; ++i) { EXPECT_NEAR(res(i), expb(i), 1.e-6); } - - int m1, s1, m2, s2; - const double t1 = 9.9, t2 = 1.0; - handler.set_approximation_parameter(A, t1, m1, s1); - A *= t1; - handler.set_approximation_parameter(A, t2, m2, s2); - EXPECT_EQ(m1, m2); - EXPECT_EQ(s1, s2); } } diff --git a/test/unit/math/prim/fun/matrix_exp_multiply_test.cpp b/test/unit/math/prim/fun/matrix_exp_multiply_test.cpp index 27772f4e348..b81df9f10a2 100644 --- a/test/unit/math/prim/fun/matrix_exp_multiply_test.cpp +++ b/test/unit/math/prim/fun/matrix_exp_multiply_test.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -21,23 +22,36 @@ inline void test_matrix_exp_multiply() { // matrix_exp_multiply Eigen::Matrix res = matrix_exp_multiply(A, B); - EXPECT_EQ(res.size(), expAB.size()); - for (int l = 0; l < res.size(); ++l) { - EXPECT_FLOAT_EQ(res(l), expAB(l)); - } + + EXPECT_MATRIX_FLOAT_EQ(expAB, res); } -TEST(MathMatrixPrimMat, matrix_exp_multiply) { - // the helper above doesn't handle 0 size inputs - Eigen::MatrixXd A(0, 0); - Eigen::MatrixXd B(0, 0); - EXPECT_EQ(stan::math::matrix_exp_multiply(A, B).size(), 0); +TEST(MathMatrixPrimMat, matrix_exp_multiply_power_1_norm_fun) { + stan::math::matrix_exp_action_handler maexp; + Eigen::MatrixXd x(2, 2); + x << 1, -2, -3, 4; + EXPECT_FLOAT_EQ(maexp.mat_power_1_norm(x, 2), 32.0); + EXPECT_FLOAT_EQ(maexp.mat_power_1_norm(x, 3), 172.0); + + Eigen::MatrixXd y(3, 3); + y << 1, 2, 3, 4, 5, 6, 7, 1, 2; + EXPECT_FLOAT_EQ(maexp.mat_power_1_norm(y, 3), 1163.0); + EXPECT_FLOAT_EQ(maexp.mat_power_1_norm(y, 12), 8.3805595e+11); +} - Eigen::MatrixXd C(0, 2); - Eigen::MatrixXd M = stan::math::matrix_exp_multiply(A, C); - EXPECT_EQ(A.rows(), M.rows()); - EXPECT_EQ(C.cols(), M.cols()); +TEST(MathMatrixPrimMat, matrix_exp_multiply_approx_order) { + stan::math::matrix_exp_action_handler maexp; + Eigen::MatrixXd x(2, 2); + x << 1, -2, -3, 4; + Eigen::VectorXd b(2); + b << 1.5, 2.5; + int m, s; + maexp.set_approx_order(x, b, 1.0, m, s); + EXPECT_EQ(m, 40); + EXPECT_EQ(s, 1); +} +TEST(MathMatrixPrimMat, matrix_exp_multiply) { test_matrix_exp_multiply<1, 1>(); test_matrix_exp_multiply<1, 5>(); test_matrix_exp_multiply<5, 1>(); @@ -45,6 +59,93 @@ TEST(MathMatrixPrimMat, matrix_exp_multiply) { test_matrix_exp_multiply<20, 2>(); } +TEST(MathMatrixPrimMat, matrix_exp_multiply_issue_2529) { + // issue #2529 https://github.com/stan-dev/math/issues/2529 + Eigen::MatrixXd a(3, 3); + a << -1.2, 1.2, 0, 0, -0.5, 0.5, 0, 0, -0.3; + Eigen::VectorXd b(3); + b << 1.0, 1.0, 1.0; + for (auto i = 15; i < 40; ++i) { + double t = i; + Eigen::MatrixXd m1 = stan::math::matrix_exp_multiply(a * t, b); + Eigen::MatrixXd m2 = stan::math::matrix_exp(a * t) * b; + EXPECT_MATRIX_FLOAT_EQ(m1, m2); + } +} + +TEST(MathMatrixPrimMat, matrix_exp_multiply_poisson_5) { + // Block tridiag mat by discretizing Poisson's Eq. on a 5x5 grid + int n = 5; + int nd = n * n; + Eigen::MatrixXd m = Eigen::MatrixXd::Zero(nd, nd); + Eigen::VectorXd b(nd); + for (auto i = 0; i < nd; ++i) { + b(i) = -1.0 + i * 2.0 / (nd - 1); + m(i, i) = 4.0; + if (i + 1 < nd) { + m(i, i + 1) = -1.0; + m(i + 1, i) = -1.0; + } + if (i + 5 < nd) { + m(i, std::min(i + 5, nd - 1)) = -1.0; + m(std::min(i + 5, nd - 1), i) = -1.0; + } + } + + double t = 1.0; + Eigen::MatrixXd p1 = stan::math::matrix_exp_multiply(m * t, b); + Eigen::MatrixXd p2 = stan::math::matrix_exp(m * t) * b; + EXPECT_MATRIX_NEAR(p1, p2, 1.e-12); +} + +TEST(MathMatrixPrimMat, matrix_exp_multiply_matlab) { + // https://blogs.mathworks.com/cleve/2012/07/23/a-balancing-act-for-the-matrix-exponential/#7401fe8a-5a7d-40df-92d7-1ae34f45adf2 + // // NOLINT + { + Eigen::MatrixXd m(3, 3); + m << 0, 1e-08, 0, -20066666666.6667, -3, 20000000000, 66.6666666666667, 0, + -66.6666666666667; + Eigen::MatrixXd expm(3, 3); + expm << 0.446849468283175, 1.54044157383952e-09, 0.462811453558774, + -5743067.77947947, -0.0152830038686819, -4526542.71278401, + 0.447722977849494, 1.54270484519591e-09, 0.463480648837651; + std::vector r{-1, 0, 1}; + Eigen::VectorXd p1(3), p2(3); + for (auto i : r) { + for (auto j : r) { + for (auto k : r) { + Eigen::VectorXd b(3); + b << i, j, k; + p1 = stan::math::matrix_exp_multiply(m, b); + p2 = expm * b; + EXPECT_MATRIX_NEAR(p1, p2, 5.e-6); + } + } + } + } + + // https://discourse.mc-stan.org/t/documentation-for-the-action-of-the-matrix-exponential/25498/8?u=yizhang + // // NOLINT + { + Eigen::MatrixXd m(2, 2); + m << -49, 24, -64, 31; + Eigen::MatrixXd expm(2, 2); + expm << -0.735758758144755, 0.551819099658099, -1.47151759908826, + 1.10363824071558; + std::vector r{0, 1, 10, 100, 1000, 10000}; + Eigen::VectorXd p1(2), p2(2); + for (auto i : r) { + for (auto j : r) { + Eigen::VectorXd b(2); + b << i, j; + p1 = stan::math::matrix_exp_multiply(m, b); + p2 = expm * b; + EXPECT_MATRIX_NEAR(p1, p2, 1.e-8); + } + } + } +} + TEST(MathMatrixPrimMat, matrix_exp_multiply_exception) { using stan::math::matrix_exp_multiply; { // multiplicable diff --git a/test/unit/math/prim/fun/scale_matrix_exp_multiply_test.cpp b/test/unit/math/prim/fun/scale_matrix_exp_multiply_test.cpp index 1dd1bf30a70..cfd75abb990 100644 --- a/test/unit/math/prim/fun/scale_matrix_exp_multiply_test.cpp +++ b/test/unit/math/prim/fun/scale_matrix_exp_multiply_test.cpp @@ -23,10 +23,7 @@ inline void test_scale_matrix_exp_multiply() { // matrix_exp_multiply const double t = 1.0; Eigen::Matrix res = scale_matrix_exp_multiply(t, A, B); - EXPECT_EQ(res.size(), expAB.size()); - for (int l = 0; l < res.size(); ++l) { - EXPECT_FLOAT_EQ(res(l), expAB(l)); - } + EXPECT_MATRIX_FLOAT_EQ(expAB, res); } TEST(MathMatrixPrimMat, scale_matrix_exp_multiply) { @@ -48,6 +45,76 @@ TEST(MathMatrixPrimMat, scale_matrix_exp_multiply) { test_scale_matrix_exp_multiply<20, 2>(); } +TEST(MathMatrixPrimMat, scale_matrix_exp_multiply_issue_1146) { + // matrix from bug report + // https://github.com/stan-dev/math/issues/1146 + int m = 17; + Eigen::MatrixXd Ac(m, m); + Ac << -4.4758400, 0.0732692, 0.0000000, 0.000000, 0.000000, 0.0000000, 0.0000, + 0.00000000, 0.000000, 0.0000000, 0.00000000, 0.0000000, 0.0000000, + 0.0000000, 0.00000000, 0.00000000, 0.00000000, 0.0183173, -0.0366346, + 0.0183173, 0.000000, 0.000000, 0.0000000, 0.0000, 0.00000000, 0.000000, + 0.0000000, 0.00000000, 0.0000000, 0.0000000, 0.0000000, 0.00000000, + 0.00000000, 0.00000000, 0.0000000, 0.0146538, -0.1794070, 0.000000, + 0.000000, 0.0268619, 0.0000, 0.00639568, 0.000000, 0.0639568, 0.00000000, + 0.0000000, 0.0426379, 0.0249012, 0.00748033, 0.00498689, 0.00415574, + 0.0000000, 0.0000000, 0.0000000, -5.868960, 0.447212, 0.0000000, 0.0000, + 0.00000000, 0.000000, 0.0000000, 0.00000000, 0.0000000, 0.0000000, + 0.0000000, 0.00000000, 0.00000000, 0.00000000, 0.0000000, 0.0000000, + 0.0000000, 0.223606, -0.447212, 0.2236060, 0.0000, 0.00000000, 0.000000, + 0.0000000, 0.00000000, 0.0000000, 0.0000000, 0.0000000, 0.00000000, + 0.00000000, 0.00000000, 0.0000000, 0.0000000, 0.3287580, 0.000000, + 0.447212, -2.3328900, 0.0000, 0.04931380, 0.000000, 0.4931380, 0.00000000, + 0.0000000, 0.3287580, 0.6857140, 0.05767690, 0.03845130, 0.03204270, + 0.0000000, 0.0000000, 0.0000000, 0.000000, 0.000000, 0.0000000, -42.1500, + 11.43980000, 0.000000, 0.0000000, 0.00000000, 0.0000000, 0.0000000, + 0.0000000, 0.00000000, 0.00000000, 0.00000000, 0.0000000, 0.0000000, + 2.3972000, 0.000000, 0.000000, 1.5102300, 11.4398, -26.34030000, 0.000000, + 3.5957900, 0.00000000, 0.0000000, 2.3972000, 5.0000000, 0.42056100, + 0.28037400, 0.23364500, 0.0000000, 0.0000000, 0.0000000, 0.000000, + 0.000000, 0.0000000, 0.0000, 0.00000000, -0.122394, 0.1223940, 0.00000000, + 0.0000000, 0.0000000, 0.0000000, 0.00000000, 0.00000000, 0.00000000, + 0.0000000, 0.0000000, 1.0958600, 0.000000, 0.000000, 0.6903930, 0.0000, + 0.16437900, 1.101550, -6.4337600, 0.00000000, 0.0000000, 1.0958600, + 2.2857100, 0.19225600, 0.12817100, 0.10680900, 0.0000000, 0.0000000, + 0.0000000, 0.000000, 0.000000, 0.0000000, 0.0000, 0.00000000, 0.000000, + 0.0000000, -0.00471794, 0.0014969, 0.0000000, 0.0000000, 0.00000000, + 0.00000000, 0.00000000, 0.0000000, 0.0000000, 0.0000000, 0.000000, + 0.000000, 0.0000000, 0.0000, 0.00000000, 0.000000, 0.0000000, 0.01901160, + -0.0402984, 0.0212869, 0.0000000, 0.00000000, 0.00000000, 0.00000000, + 0.0000000, 0.0000000, 0.0426379, 0.000000, 0.000000, 0.0268619, 0.0000, + 0.00639568, 0.000000, 0.0639568, 0.00000000, 0.0170295, -0.3347470, + 0.1778660, 0.00748033, 0.00498689, 0.00415574, 0.0000000, 0.0000000, + 0.2695880, 0.000000, 0.000000, 0.6065730, 0.0000, 0.14442200, 0.000000, + 1.4442200, 0.00000000, 0.0000000, 1.9256300, -4.3904300, 0.03851260, + 0.15405000, 0.19256300, 0.0000000, 0.0000000, 0.0000000, 0.000000, + 0.000000, 0.0000000, 0.0000, 0.00000000, 0.000000, 0.0000000, 0.00000000, + 0.0000000, 0.0000000, 0.0000000, -1.00000000, 0.00000000, 0.00000000, + 0.0000000, 0.0000000, 0.0000000, 0.000000, 0.000000, 0.0000000, 0.0000, + 0.00000000, 0.000000, 0.0000000, 0.00000000, 0.0000000, 0.0000000, + 0.0000000, 0.00000000, -1.00000000, 0.00000000, 0.0000000, 0.0000000, + 0.0000000, 0.000000, 0.000000, 0.0000000, 0.0000, 0.00000000, 0.000000, + 0.0000000, 0.00000000, 0.0000000, 0.0000000, 0.0000000, 0.00000000, + 0.00000000, -1.00000000; + + Eigen::MatrixXd phi = Eigen::MatrixXd::Zero(2 * m, 2 * m); + Eigen::MatrixXd OI = Eigen::MatrixXd::Zero(2 * m, m); + + OI.bottomRightCorner(m, m) = Eigen::VectorXd::Constant(m, 1.0).asDiagonal(); + phi.topLeftCorner(m, m) = Ac; + phi.topRightCorner(m, m) = Eigen::VectorXd::Constant(m, 0.1).asDiagonal(); + phi.bottomRightCorner(m, m) = -Ac.transpose(); + + Eigen::MatrixXd expAB(2 * m, m); + Eigen::MatrixXd res(2 * m, m); + for (auto i = 0; i < 20; ++i) { + double dt = 0.1 + i * 0.05; + expAB = stan::math::matrix_exp(phi * dt) * OI; + res = stan::math::scale_matrix_exp_multiply(dt, phi, OI); + EXPECT_MATRIX_FLOAT_EQ(expAB, res); + } +} + TEST(MathMatrixPrimMat, scale_matrix_exp_multiply_exception) { using stan::math::scale_matrix_exp_multiply; const double t = 1.0;