Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 20 additions & 26 deletions albatross/core/parameter_handling_mixin.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ using ParameterKey = std::string;
using ParameterPrior = std::shared_ptr<Prior>;
using ParameterValue = double;

struct TunableParameters {
std::vector<double> values;
std::vector<double> lower_bounds;
std::vector<double> upper_bounds;
};

struct Parameter {
ParameterValue value;
ParameterPrior prior;
Expand Down Expand Up @@ -228,40 +234,28 @@ class ParameterHandlingMixin {
* some function doesn't actually care what any of the parameters
* correspond to.
*/
std::vector<ParameterValue> get_params_as_vector() const {
std::vector<ParameterValue> x;

TunableParameters get_tunable_parameters() const {
TunableParameters output;

const ParameterStore params = get_params();
for (const auto &pair : params) {
if (!pair.second.is_fixed()) {
x.push_back(pair.second.value);
}
}
return x;
}
output.values.push_back(pair.second.value);

std::vector<double> get_param_lower_bounds() const {
std::vector<double> lb;
const auto params = get_params();
for (const auto &pair : params) {
double bound = pair.second.has_prior() ? pair.second.prior->lower_bound()
: -LARGE_VAL;
lb.push_back(fmax(bound, -LARGE_VAL));
}
return lb;
}
double lb = pair.second.has_prior() ? pair.second.prior->lower_bound()
: -LARGE_VAL;
output.lower_bounds.push_back(lb);

std::vector<double> get_param_upper_bounds() const {
std::vector<double> ub;
const auto params = get_params();
for (const auto &pair : params) {
double bound = pair.second.has_prior() ? pair.second.prior->upper_bound()
: LARGE_VAL;
ub.push_back(fmin(bound, LARGE_VAL));
double ub = pair.second.has_prior() ? pair.second.prior->upper_bound()
: -LARGE_VAL;
output.upper_bounds.push_back(ub);
}
}
return ub;
return output;
}

void set_params_from_vector(const std::vector<ParameterValue> &x) {
void set_tunable_params_values(const std::vector<ParameterValue> &x) {
const ParameterStore params = get_params();
std::size_t i = 0;
for (const auto &pair : params) {
Expand Down
8 changes: 4 additions & 4 deletions albatross/covariance_functions/covariance_functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,11 @@ template <typename Term> struct CovarianceFunction {
return term.set_prior(key, prior);
};
inline auto pretty_string() const { return term.pretty_string(); };
inline auto get_params_as_vector() const {
return term.get_params_as_vector();
inline auto get_tunable_parameters() const {
return term.get_tunable_parameters();
};
inline auto set_params_from_vector(const std::vector<ParameterValue> &x) {
return term.set_params_from_vector(x);
inline auto set_tunable_params_values(const std::vector<ParameterValue> &x) {
return term.set_tunable_params_values(x);
};
};

Expand Down
20 changes: 9 additions & 11 deletions albatross/tune.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,16 +70,14 @@ template <class FeatureType> struct TuneModelConfig {
// The various algorithms in nlopt are coded by the first two characters.
// In this case LN stands for local, gradient free.
auto m = model_creator();
auto x = m->get_params_as_vector();
optimizer = nlopt::opt(nlopt::LN_PRAXIS, (unsigned)x.size());

auto tunable_params = m->get_tunable_parameters();
optimizer =
nlopt::opt(nlopt::LN_PRAXIS, (unsigned)tunable_params.values.size());
optimizer.set_ftol_abs(1e-8);
optimizer.set_ftol_rel(1e-6);

const auto lb = m->get_param_lower_bounds();
optimizer.set_lower_bounds(lb);

const auto ub = m->get_param_upper_bounds();
optimizer.set_upper_bounds(ub);
optimizer.set_lower_bounds(tunable_params.lower_bounds);
optimizer.set_upper_bounds(tunable_params.upper_bounds);
// the sensitivity to parameters varies greatly between parameters so
// terminating based on change in x isn't a great criteria, we only
// terminate based on xtol if the change is super small.
Expand Down Expand Up @@ -108,7 +106,7 @@ double objective_function(const std::vector<double> &x,

const auto model = config.model_creator();

model->set_params_from_vector(x);
model->set_tunable_params_values(x);

if (!model->params_are_valid()) {
config.output_stream << "Invalid Parameters:" << std::endl;
Expand Down Expand Up @@ -138,7 +136,7 @@ ParameterStore
tune_regression_model(const TuneModelConfig<FeatureType> &config) {

const auto example_model = config.model_creator();
auto x = example_model->get_params_as_vector();
auto x = example_model->get_tunable_parameters().values;

assert(x.size());
nlopt::opt opt = config.optimizer;
Expand All @@ -147,7 +145,7 @@ tune_regression_model(const TuneModelConfig<FeatureType> &config) {
nlopt::result result = opt.optimize(x, minf);

// Tell the user what the final parameters were.
example_model->set_params_from_vector(x);
example_model->set_tunable_params_values(x);
config.output_stream << "==================" << std::endl;
config.output_stream << "TUNED MODEL PARAMS" << std::endl;
config.output_stream << "nlopt termination code: " << to_string(result)
Expand Down
25 changes: 13 additions & 12 deletions tests/test_parameter_handling_mixin.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,13 +73,13 @@ TEST(test_parameter_handler, test_get_set_from_vector) {

// Make sure we start with the parameter vector we'd expect, even though
// it was initialized out of order.
expect_parameter_vector_equal(original_param_vector,
original_handler.get_params_as_vector());
expect_parameter_vector_equal(
original_param_vector, original_handler.get_tunable_parameters().values);

// Now set the parameters using a new vector and make sure they stick
original_handler.set_params_from_vector(expected_param_vector);
expect_parameter_vector_equal(expected_param_vector,
original_handler.get_params_as_vector());
original_handler.set_tunable_params_values(expected_param_vector);
expect_parameter_vector_equal(
expected_param_vector, original_handler.get_tunable_parameters().values);
}

/*
Expand All @@ -104,13 +104,13 @@ TEST(test_parameter_handler, test_get_set_from_vector_with_fixed) {

// Make sure we start with the parameter vector we'd expect, even though
// it was initialized out of order.
expect_parameter_vector_equal(original_param_vector,
original_handler.get_params_as_vector());
expect_parameter_vector_equal(
original_param_vector, original_handler.get_tunable_parameters().values);

// Now set the parameters using a new vector and make sure they stick
original_handler.set_params_from_vector(expected_param_vector);
expect_parameter_vector_equal(expected_param_vector,
original_handler.get_params_as_vector());
original_handler.set_tunable_params_values(expected_param_vector);
expect_parameter_vector_equal(
expected_param_vector, original_handler.get_tunable_parameters().values);
}

TEST(test_parameter_handler, test_prior_log_likelihood) {
Expand All @@ -134,7 +134,7 @@ TEST(test_parameter_handler, test_set_prior) {
auto p = TestParameterHandler();

const auto orig_params = p.get_params();
const auto orig_param_vector = p.get_params_as_vector();
const auto orig_param_vector = p.get_tunable_parameters().values;

for (const auto &pair : orig_params) {
ParameterPrior gaussian_prior =
Expand All @@ -147,7 +147,8 @@ TEST(test_parameter_handler, test_set_prior) {
// same.
EXPECT_NE(orig_params, params_with_priors);
// But the parameter values shouldn't have changed.
expect_parameter_vector_equal(orig_param_vector, p.get_params_as_vector());
expect_parameter_vector_equal(orig_param_vector,
p.get_tunable_parameters().values);

// We should also be able to change the parameter values without
// modifying the prior.
Expand Down
2 changes: 2 additions & 0 deletions tests/test_tune.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ TEST(test_tune, test_with_prior) {
model->set_prior(pair.first, std::make_shared<GaussianPrior>(
pair.second.value + 0.1, 0.001));
}
auto param_names = map_keys(model->get_params());
model->set_prior(param_names[0], std::make_shared<FixedPrior>());
return model;
};

Expand Down