-
Notifications
You must be signed in to change notification settings - Fork 44
/
calculator.py
157 lines (137 loc) · 5.08 KB
/
calculator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
from __future__ import annotations
import copy
from typing import Dict, List, Optional, Sequence, Tuple
import amici
import numpy as np
from amici.parameter_mapping import ParameterMapping
from ..C import (
AMICI_SIGMAY,
AMICI_Y,
GRAD,
INNER_PARAMETERS,
INNER_RDATAS,
RDATAS,
ModeType,
)
from ..objective.amici.amici_calculator import (
AmiciCalculator,
AmiciModel,
AmiciSolver,
)
from .problem import AmiciInnerProblem
from .solver import AnalyticalInnerSolver, InnerSolver
class HierarchicalAmiciCalculator(AmiciCalculator):
"""
A calculator that is passed as `calculator` to the pypesto.AmiciObjective.
While this class cannot be used directly, it has two subclasses
which allow to use forward or adjoint sensitivity analysis to
solve a `pypesto.HierarchicalProblem` efficiently in an inner loop,
while the outer optimization is only concerned with variables not
specified as `pypesto.HierarchicalParameter`s.
"""
def __init__(
self,
inner_problem: AmiciInnerProblem,
inner_solver: Optional[InnerSolver] = None,
):
"""Initialize the calculator from the given problem.
Arguments
---------
inner_problem:
The inner problem of a hierarchical optimization problem.
inner_solver:
A solver to solve ``inner_problem``.
Defaults to ``pypesto.hierarchical.solver.AnalyticalInnerSolver``.
"""
super().__init__()
self.inner_problem = inner_problem
if inner_solver is None:
inner_solver = AnalyticalInnerSolver()
self.inner_solver = inner_solver
def initialize(self):
"""Initialize."""
super().initialize()
self.inner_solver.initialize()
def __call__(
self,
x_dct: Dict,
sensi_orders: Tuple[int],
mode: ModeType,
amici_model: AmiciModel,
amici_solver: AmiciSolver,
edatas: List[amici.ExpData],
n_threads: int,
x_ids: Sequence[str],
parameter_mapping: ParameterMapping,
fim_for_hess: bool,
):
"""Perform the actual AMICI call, with hierarchical optimization.
See documentation for
`pypesto.objective.amici.amici_calculator.AmiciCalculator.__call__()`.
The return object also includes the simulation results that were
generated to solve the inner problem, as well as the parameters that
solver the inner problem.
"""
if not self.inner_problem.check_edatas(edatas=edatas):
raise ValueError(
'The experimental data provided to this call differs from '
'the experimental data used to setup the hierarchical '
'optimizer.'
)
# compute optimal inner parameters
x_dct = copy.deepcopy(x_dct)
x_dct.update(self.inner_problem.get_dummy_values(scaled=True))
inner_result = super().__call__(
x_dct=x_dct,
sensi_orders=(0,),
mode=mode,
amici_model=amici_model,
amici_solver=amici_solver,
edatas=edatas,
n_threads=n_threads,
x_ids=x_ids,
parameter_mapping=parameter_mapping,
fim_for_hess=fim_for_hess,
)
inner_rdatas = inner_result[RDATAS]
# if any amici simulation failed, it's unlikely we can compute
# meaningful inner parameters, so we better just fail early.
if any(rdata.status != amici.AMICI_SUCCESS for rdata in inner_rdatas):
# if the gradient was requested, we need to provide some value
# for it
if 1 in sensi_orders:
inner_result[GRAD] = np.full(
shape=len(x_ids), fill_value=np.nan
)
return inner_result
inner_parameters = self.inner_solver.solve(
problem=self.inner_problem,
sim=[rdata[AMICI_Y] for rdata in inner_rdatas],
sigma=[rdata[AMICI_SIGMAY] for rdata in inner_rdatas],
scaled=True,
)
# fill in optimal values
# directly writing to parameter mapping ensures that plists do not
# include hierarchically computed parameters
x_dct = copy.deepcopy(x_dct)
x_dct.update(inner_parameters)
# TODO use plist to compute only required derivatives, in
# `super.__call__`, `amici.parameter_mapping.fill_in_parameters`
# TODO speed gain: if no gradient is required, then simulation can be
# skipped here, and rdatas can be updated in place
# (y, sigma, res, llh).
result = super().__call__(
x_dct=x_dct,
sensi_orders=sensi_orders,
mode=mode,
amici_model=amici_model,
amici_solver=amici_solver,
edatas=edatas,
n_threads=n_threads,
x_ids=x_ids,
parameter_mapping=parameter_mapping,
fim_for_hess=fim_for_hess,
)
result[INNER_PARAMETERS] = inner_parameters
result[INNER_RDATAS] = inner_rdatas
return result