diff --git a/albatross/core/parameter_handling_mixin.h b/albatross/core/parameter_handling_mixin.h index f50dafa5..dba3b79b 100644 --- a/albatross/core/parameter_handling_mixin.h +++ b/albatross/core/parameter_handling_mixin.h @@ -36,6 +36,12 @@ using ParameterKey = std::string; using ParameterPrior = std::shared_ptr; using ParameterValue = double; +struct TunableParameters { + std::vector values; + std::vector lower_bounds; + std::vector upper_bounds; +}; + struct Parameter { ParameterValue value; ParameterPrior prior; @@ -228,40 +234,28 @@ class ParameterHandlingMixin { * some function doesn't actually care what any of the parameters * correspond to. */ - std::vector get_params_as_vector() const { - std::vector x; + + TunableParameters get_tunable_parameters() const { + TunableParameters output; + const ParameterStore params = get_params(); for (const auto &pair : params) { if (!pair.second.is_fixed()) { - x.push_back(pair.second.value); - } - } - return x; - } + output.values.push_back(pair.second.value); - std::vector get_param_lower_bounds() const { - std::vector lb; - const auto params = get_params(); - for (const auto &pair : params) { - double bound = pair.second.has_prior() ? pair.second.prior->lower_bound() - : -LARGE_VAL; - lb.push_back(fmax(bound, -LARGE_VAL)); - } - return lb; - } + double lb = pair.second.has_prior() ? pair.second.prior->lower_bound() + : -LARGE_VAL; + output.lower_bounds.push_back(lb); - std::vector get_param_upper_bounds() const { - std::vector ub; - const auto params = get_params(); - for (const auto &pair : params) { - double bound = pair.second.has_prior() ? pair.second.prior->upper_bound() - : LARGE_VAL; - ub.push_back(fmin(bound, LARGE_VAL)); + double ub = pair.second.has_prior() ? pair.second.prior->upper_bound() + : -LARGE_VAL; + output.upper_bounds.push_back(ub); + } } - return ub; + return output; } - void set_params_from_vector(const std::vector &x) { + void set_tunable_params_values(const std::vector &x) { const ParameterStore params = get_params(); std::size_t i = 0; for (const auto &pair : params) { diff --git a/albatross/covariance_functions/covariance_functions.h b/albatross/covariance_functions/covariance_functions.h index 2cf50d44..2b4bd504 100644 --- a/albatross/covariance_functions/covariance_functions.h +++ b/albatross/covariance_functions/covariance_functions.h @@ -89,11 +89,11 @@ template struct CovarianceFunction { return term.set_prior(key, prior); }; inline auto pretty_string() const { return term.pretty_string(); }; - inline auto get_params_as_vector() const { - return term.get_params_as_vector(); + inline auto get_tunable_parameters() const { + return term.get_tunable_parameters(); }; - inline auto set_params_from_vector(const std::vector &x) { - return term.set_params_from_vector(x); + inline auto set_tunable_params_values(const std::vector &x) { + return term.set_tunable_params_values(x); }; }; diff --git a/albatross/tune.h b/albatross/tune.h index a8020dff..030b221d 100644 --- a/albatross/tune.h +++ b/albatross/tune.h @@ -70,16 +70,14 @@ template struct TuneModelConfig { // The various algorithms in nlopt are coded by the first two characters. // In this case LN stands for local, gradient free. auto m = model_creator(); - auto x = m->get_params_as_vector(); - optimizer = nlopt::opt(nlopt::LN_PRAXIS, (unsigned)x.size()); + + auto tunable_params = m->get_tunable_parameters(); + optimizer = + nlopt::opt(nlopt::LN_PRAXIS, (unsigned)tunable_params.values.size()); optimizer.set_ftol_abs(1e-8); optimizer.set_ftol_rel(1e-6); - - const auto lb = m->get_param_lower_bounds(); - optimizer.set_lower_bounds(lb); - - const auto ub = m->get_param_upper_bounds(); - optimizer.set_upper_bounds(ub); + optimizer.set_lower_bounds(tunable_params.lower_bounds); + optimizer.set_upper_bounds(tunable_params.upper_bounds); // the sensitivity to parameters varies greatly between parameters so // terminating based on change in x isn't a great criteria, we only // terminate based on xtol if the change is super small. @@ -108,7 +106,7 @@ double objective_function(const std::vector &x, const auto model = config.model_creator(); - model->set_params_from_vector(x); + model->set_tunable_params_values(x); if (!model->params_are_valid()) { config.output_stream << "Invalid Parameters:" << std::endl; @@ -138,7 +136,7 @@ ParameterStore tune_regression_model(const TuneModelConfig &config) { const auto example_model = config.model_creator(); - auto x = example_model->get_params_as_vector(); + auto x = example_model->get_tunable_parameters().values; assert(x.size()); nlopt::opt opt = config.optimizer; @@ -147,7 +145,7 @@ tune_regression_model(const TuneModelConfig &config) { nlopt::result result = opt.optimize(x, minf); // Tell the user what the final parameters were. - example_model->set_params_from_vector(x); + example_model->set_tunable_params_values(x); config.output_stream << "==================" << std::endl; config.output_stream << "TUNED MODEL PARAMS" << std::endl; config.output_stream << "nlopt termination code: " << to_string(result) diff --git a/tests/test_parameter_handling_mixin.cc b/tests/test_parameter_handling_mixin.cc index dbdc127c..9e8ba914 100644 --- a/tests/test_parameter_handling_mixin.cc +++ b/tests/test_parameter_handling_mixin.cc @@ -73,13 +73,13 @@ TEST(test_parameter_handler, test_get_set_from_vector) { // Make sure we start with the parameter vector we'd expect, even though // it was initialized out of order. - expect_parameter_vector_equal(original_param_vector, - original_handler.get_params_as_vector()); + expect_parameter_vector_equal( + original_param_vector, original_handler.get_tunable_parameters().values); // Now set the parameters using a new vector and make sure they stick - original_handler.set_params_from_vector(expected_param_vector); - expect_parameter_vector_equal(expected_param_vector, - original_handler.get_params_as_vector()); + original_handler.set_tunable_params_values(expected_param_vector); + expect_parameter_vector_equal( + expected_param_vector, original_handler.get_tunable_parameters().values); } /* @@ -104,13 +104,13 @@ TEST(test_parameter_handler, test_get_set_from_vector_with_fixed) { // Make sure we start with the parameter vector we'd expect, even though // it was initialized out of order. - expect_parameter_vector_equal(original_param_vector, - original_handler.get_params_as_vector()); + expect_parameter_vector_equal( + original_param_vector, original_handler.get_tunable_parameters().values); // Now set the parameters using a new vector and make sure they stick - original_handler.set_params_from_vector(expected_param_vector); - expect_parameter_vector_equal(expected_param_vector, - original_handler.get_params_as_vector()); + original_handler.set_tunable_params_values(expected_param_vector); + expect_parameter_vector_equal( + expected_param_vector, original_handler.get_tunable_parameters().values); } TEST(test_parameter_handler, test_prior_log_likelihood) { @@ -134,7 +134,7 @@ TEST(test_parameter_handler, test_set_prior) { auto p = TestParameterHandler(); const auto orig_params = p.get_params(); - const auto orig_param_vector = p.get_params_as_vector(); + const auto orig_param_vector = p.get_tunable_parameters().values; for (const auto &pair : orig_params) { ParameterPrior gaussian_prior = @@ -147,7 +147,8 @@ TEST(test_parameter_handler, test_set_prior) { // same. EXPECT_NE(orig_params, params_with_priors); // But the parameter values shouldn't have changed. - expect_parameter_vector_equal(orig_param_vector, p.get_params_as_vector()); + expect_parameter_vector_equal(orig_param_vector, + p.get_tunable_parameters().values); // We should also be able to change the parameter values without // modifying the prior. diff --git a/tests/test_tune.cc b/tests/test_tune.cc index cd5b1de5..b0df5103 100644 --- a/tests/test_tune.cc +++ b/tests/test_tune.cc @@ -72,6 +72,8 @@ TEST(test_tune, test_with_prior) { model->set_prior(pair.first, std::make_shared( pair.second.value + 0.1, 0.001)); } + auto param_names = map_keys(model->get_params()); + model->set_prior(param_names[0], std::make_shared()); return model; };