Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

implement support for parameterScale{Normal,Laplace} #520

Merged
merged 16 commits into from
Nov 30, 2020
88 changes: 60 additions & 28 deletions pypesto/objective/priors.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,21 +171,25 @@ def get_parameter_prior_dict(index: int,
index of the parameter in x_full

prior_type: str
Prior is defined in LINEAR=untransformed parameter space! prior_type
can from {uniform, normal, laplace, logUniform, logNormal, logLaplace}
Prior is defined in LINEAR=untransformed parameter space,
unless it starts with "parameterScale"! prior_type
FFroehlich marked this conversation as resolved.
Show resolved Hide resolved
can be from {"uniform", "normal", "laplace", "logUniform", "logNormal",
FFroehlich marked this conversation as resolved.
Show resolved Hide resolved
"logLaplace", "parameterScaleUniform", "parameterScaleNormal",
FFroehlich marked this conversation as resolved.
Show resolved Hide resolved
"parameterScaleLaplace"}

prior_parameters:
Parameters of the priors. Parameters are defined in linear scale.

parameter_scale:
scale, in which parameter is defined (since a parameter can be
FFroehlich marked this conversation as resolved.
Show resolved Hide resolved
log-transformed, while the prior is always defined in the linear space)
log-transformed, while the prior is always defined in the linear
space, unless it starts with "parameterScale")
"""

log_f, d_log_f_dx, dd_log_f_ddx = \
_prior_densities(prior_type, prior_parameters)

if parameter_scale == 'lin':
if parameter_scale == 'lin' or prior_type.startswith('parameterScale'):

return {'index': index,
'density_fun': log_f,
Expand Down Expand Up @@ -247,34 +251,62 @@ def _prior_densities(prior_type: str,
Callable]:
"""
Returns a tuple of Callables of the (log-)density (in untransformed =
FFroehlich marked this conversation as resolved.
Show resolved Hide resolved
linear scale), together with their first + second derivative
(= sensis) w.r.t. x.

linear scale), unless prior_types starts with "parameterScale",
together with their first + second derivative (= sensis) w.r.t.
the parameters.

Currently the following distributions are supported:

* uniform:
prior_parameters[0] and prior_parameters[1] give the lower and upper
boundary.
* normal:
normal distribution, with mean prior_parameters[0] and
standard deviation prior_parameters[1]
* laplace:
laplace distribution, with location prior_parameters[0] and
scale prior_parameters[1]
* logNormal:
logNormal distribution, where prior_parameters are mean and
standard deviation of the exp(X).

Currently not supported, but eventually planed are the
following distributions:

* logUniform
* logLaplace
Parameters
----------

prior_type:
string identifier indicating the distribution to be used

* uniform:
Uniform distribution on transformed parameter scale.
FFroehlich marked this conversation as resolved.
Show resolved Hide resolved
* parameterScaleUniform:
Uniform distribution on original parameter scale.
* normal:
Normal distribution on transformed parameter scale.
* parameterScaleNormal:
Normal distribution on original parameter scale.
* laplace:
Laplace distribution on transformed parameter scale
* parameterScaleLaplace:
Laplace distribution on original parameter scale.
* logNormal:
LogNormal distribution on transformed parameter scale

Currently not supported, but eventually planned are the
following distributions:

* logUniform
* logLaplace

prior_parameters:
parameters for the distribution

* uniform/parameterScaleUniform:
- prior_parameters[0]: lower bound
- prior_parameters[1]: upper bound

* laplace/parameterScaleLaplace:
- prior_parameters[0]: location parameter
- prior_parameters[1]: scale parameter

* normal/parameterScaleNormal:
- prior_parameters[0]: mean
- prior_parameters[1]: standard deviation

* logNormal:
- prior_parameters[0]: mean of log-parameters
- prior_parameters[1]: standard deviation of log-parameters


"""

if prior_type == 'uniform':
if prior_type in ['uniform', 'parameterScaleUniform']:
FFroehlich marked this conversation as resolved.
Show resolved Hide resolved

def log_f(x):
if prior_parameters[0] <= x <= prior_parameters[1]:
Expand All @@ -287,7 +319,7 @@ def log_f(x):

return log_f, d_log_f_dx, dd_log_f_ddx

elif prior_type == 'normal':
elif prior_type in ['normal', 'parameterScaleNormal']:

mean = prior_parameters[0]
sigma2 = prior_parameters[1]**2
Expand All @@ -302,7 +334,7 @@ def log_f(x):

return log_f, d_log_f_dx, dd_log_f_ddx

elif prior_type == 'laplace':
elif prior_type in ['laplace', 'parameterScaleLaplace']:

mean = prior_parameters[0]
scale = prior_parameters[1]
Expand Down
117 changes: 67 additions & 50 deletions test/test_prior.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import math
import itertools
import pytest
import numpy as np
import scipy.optimize as opt

Expand All @@ -13,85 +14,101 @@
from pypesto.objective import NegLogParameterPriors
from pypesto.objective.priors import get_parameter_prior_dict

scales = ['lin', 'log', 'log10']
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Similarly, these and other string constants in this file could be replaced with petab.C constants

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This would make the pypesto API dependent on petab, which I would rather avoid. It should be possible to use these functions without petab and we don't want that functionality to break just because petab changed some of its literals.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Then it might be worthwhile for pyPESTO to maintain its own set of constants (no need now).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

agreed!


def test_mode():

@pytest.fixture(params=scales)
def scale(request):
return request.param


prior_types = ['uniform', 'normal', 'laplace', 'logNormal',
'parameterScaleUniform', 'parameterScaleNormal',
'parameterScaleLaplace']


@pytest.fixture(params=prior_types)
def prior_type(request):
return request.param


def test_mode(scale, prior_type):
"""
Tests the maximum/optimum for priors in different scales...
"""

scales = ['lin', 'log', 'log10']
prior_types = ['normal', 'laplace',
'logNormal']

problem_dict = {'lin': {'lb': [0], 'ub': [10], 'opt': [1]},
'log': {'lb': [-3], 'ub': [3], 'opt': [0]},
'log10': {'lb': [-3], 'ub': [2], 'opt': [0]}}

for prior_type, scale in itertools.product(prior_types, scales):
prior_list = [get_parameter_prior_dict(
0, prior_type, [1, 1], scale)]

prior_list = [get_parameter_prior_dict(
0, prior_type, [1, 1], scale)]
test_prior = NegLogParameterPriors(prior_list)
test_problem = pypesto.Problem(test_prior,
lb=problem_dict[scale]['lb'],
ub=problem_dict[scale]['ub'],
dim_full=1,
x_scales=[scale])

test_prior = NegLogParameterPriors(prior_list)
test_problem = pypesto.Problem(test_prior,
lb=problem_dict[scale]['lb'],
ub=problem_dict[scale]['ub'],
dim_full=1,
x_scales=[scale])
if prior_type.startswith('parameterScale'):
scale = 'lin'

optimizer = pypesto.optimize.ScipyOptimizer(method='Nelder-Mead')
optimizer = pypesto.optimize.ScipyOptimizer(method='Nelder-Mead')

result = pypesto.optimize.minimize(
problem=test_problem, optimizer=optimizer, n_starts=10)

assert np.isclose(result.optimize_result.list[0]['x'],
problem_dict[scale]['opt'], atol=1e-04)
result = pypesto.optimize.minimize(
problem=test_problem, optimizer=optimizer, n_starts=10)

# test uniform distribution:
for scale in scales:
prior_dict = get_parameter_prior_dict(
0, 'uniform', [1, 2], scale)
if prior_type in ['uniform', 'parameterScaleUniform']:

if prior_type == 'parameterScaleUniform':
scale = 'lin'

# check inside and outside of interval
assert abs(prior_dict['density_fun'](lin_to_scaled(.5, scale))
- 0) < 1e-8
assert abs(prior_list[0]['density_fun'](
lin_to_scaled(.5, scale)) - 0
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
lin_to_scaled(.5, scale)) - 0
lin_to_scaled(.5, scale))

Are these - 0 or - math.log(1) from the old code intended?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

best to check via "diff < ...".

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure what you are suggesting here.

) < 1e-8

assert abs(prior_dict['density_fun'](lin_to_scaled(1.5, scale))
- math.log(1)) < 1e-8
assert abs(prior_list[0]['density_fun'](
lin_to_scaled(1.5, scale)) - math.log(1)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
lin_to_scaled(1.5, scale)) - math.log(1)
lin_to_scaled(1.5, scale))

) < 1e-8

assert abs(prior_dict['density_fun'](lin_to_scaled(2.5, scale))
- 0) < 1e-8
assert abs(prior_list[0]['density_fun'](
lin_to_scaled(2.5, scale)) - 0
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
lin_to_scaled(2.5, scale)) - 0
lin_to_scaled(2.5, scale))

) < 1e-8

else:
# flat functions don't have local minima, so dont check this for
# uniform priors
assert np.isclose(result.optimize_result.list[0]['x'],
problem_dict[scale]['opt'], atol=1e-04)

def test_derivatives():

def test_derivatives(prior_type, scale):
"""
Tests the finite gradients and second order derivatives.
"""

scales = ['lin', 'log', 'log10']
prior_types = ['uniform', 'normal', 'laplace', 'logNormal']

for prior_type, scale in itertools.product(prior_types, scales):

if prior_type == 'uniform':
prior_parameters = [-1, 1]
else:
prior_parameters = [1, 1]
if prior_type in ['uniform', 'parameterScaleUniform']:
prior_parameters = [-1, 1]
else:
prior_parameters = [1, 1]

prior_dict = get_parameter_prior_dict(
0, prior_type, prior_parameters, scale)
prior_dict = get_parameter_prior_dict(
0, prior_type, prior_parameters, scale)

# use this x0, since it is a moderate value both in linear
# and in log scale...
x0 = np.array([0.5])
# use this x0, since it is a moderate value both in linear
# and in log scale...
x0 = np.array([0.5])

err_grad = opt.check_grad(prior_dict['density_fun'],
prior_dict['density_dx'], x0)
err_hes = opt.check_grad(prior_dict['density_dx'],
prior_dict['density_ddx'], x0)
err_grad = opt.check_grad(prior_dict['density_fun'],
prior_dict['density_dx'], x0)
err_hes = opt.check_grad(prior_dict['density_dx'],
prior_dict['density_ddx'], x0)

assert err_grad < 1e-3
assert err_hes < 1e-3
assert err_grad < 1e-3
assert err_hes < 1e-3


def lin_to_scaled(x: float,
Expand Down