Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/tBuLi/symfit
Browse files Browse the repository at this point in the history
  • Loading branch information
Peter Kroon committed Nov 25, 2019
2 parents 3c1167c + 57fa8ca commit 60ed844
Show file tree
Hide file tree
Showing 3 changed files with 147 additions and 57 deletions.
155 changes: 100 additions & 55 deletions symfit/core/models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from collections import namedtuple, Mapping, OrderedDict
from collections import Mapping, OrderedDict
import operator
import warnings
import sys

Expand All @@ -18,39 +19,72 @@
else:
import funcsigs as inspect_sig

def variabletuple(typename, variables, *args, **kwargs):

class ModelOutput(tuple):
"""
Create a :func:`~collections.namedtuple` using :class:`~symfit.core.argument.Variable`'s
whoses names will be used as `field_names`.
Object to hold the output of a model call. It mimics a
:func:`collections.namedtuple`, but is initiated with
:class:`~symfit.core.argument.Variable` objects instead of strings.
The main reason for using this object is the `_asdict()` method: whereas a
``namedtuple`` initiates such an :class:`collections.OrderedDict` with the
``field_names`` as keys, this object returns a
:class:`collections.OrderedDict` which immediately has the ``Variable``
objects as keys.
Its information can be accessed using indexing or as attributes::
Example::
>>> x, y = variables('x, y')
>>> a, b = parameters('a, b')
>>> model = Model({y: a * x + b})
>>> ans = model(x=2, a=1, b=3)
>>> print(ans)
ModelOutput(variables=[y], output=[5])
>>> ans[0]
5
>>> ans.y
5
>>> x = Variable('x')
>>> Result = variabletuple('Result', [x])
>>> res = Result(5.0)
>>> res._asdict()
OrderedDict((x, 5.0))
:param typename: Name of the `variabletuple`.
:param variables: List of :class:`~symfit.core.argument.Variable`, to be used
as `field_names`
:param args: See :func:`~collections.namedtuple`
:param kwargs: See :func:`~collections.namedtuple`
:return: Type ``typename``
"""
def __new__(self, variables, output):
"""
``variables`` and ``output`` need to be in the same order!
:param variables: The variables corresponding to ``output``.
:param output: The output of a call which should be mapped to
``variables``.
"""
return tuple.__new__(ModelOutput, output)

def __init__(self, variables, output):
"""
``variables`` and ``output`` need to be in the same order!
:param variables: The variables corresponding to ``output``.
:param output: The output of a call which should be mapped to
``variables``.
"""
self.variables = variables
self.output = output
self.output_dict = OrderedDict(zip(variables, output))
self.variable_names = {var.name: var for var in variables}

def __getattr__(self, name):
try:
var = self.variable_names[name]
except KeyError as err:
raise AttributeError(err)
return self.output_dict[var]

def __getitem__(self, key):
return self.output[key]

def __repr__(self):
return self.__class__.__name__ + '(variables={}, output={})'.format(self.variables, self.output)

def _asdict(self):
return OrderedDict(zip(variables, self))
"""
:return: Returns a new OrderedDict representing this object.
"""
return self.output_dict.copy()

field_names = [var.name for var in variables]
named = namedtuple(typename, field_names, *args, **kwargs)
named._asdict = _asdict
return named
def __len__(self):
return len(self.output_dict)


class ModelError(Exception):
Expand Down Expand Up @@ -629,8 +663,7 @@ def __call__(self, *args, **kwargs):
:return: A namedtuple of all the dependent vars evaluated at the desired point. Will always return a tuple,
even for scalar valued functions. This is done for consistency.
"""
Ans = variabletuple('Ans', self)
return Ans(*self.eval_components(*args, **kwargs))
return ModelOutput(self.keys(), self.eval_components(*args, **kwargs))


class BaseGradientModel(BaseCallableModel):
Expand Down Expand Up @@ -710,8 +743,7 @@ def eval_jacobian(self, *args, **kwargs):
"""
:return: The jacobian matrix of the function.
"""
Ans = variabletuple('Ans', self)
return Ans(*self.finite_difference(*args, **kwargs))
return ModelOutput(self.keys(), self.finite_difference(*args, **kwargs))


class CallableNumericalModel(BaseCallableModel, BaseNumericalModel):
Expand Down Expand Up @@ -770,7 +802,6 @@ def numerical_components(self):
:return: lambda functions of each of the analytical components in
model_dict, to be used in numerical calculation.
"""
Ans = variabletuple('Ans', self.keys())
# All components must feature the independent vars and params, that's
# the API convention. But for those components which also contain
# interdependence, we add those vars
Expand All @@ -781,7 +812,7 @@ def numerical_components(self):
key = lambda arg: [isinstance(arg, Parameter), str(arg)]
ordered = sorted(dependencies, key=key)
components.append(sympy_to_py(expr, ordered))
return Ans(*components)
return ModelOutput(self.keys(), components)


class GradientModel(CallableModel, BaseGradientModel):
Expand Down Expand Up @@ -833,8 +864,7 @@ def eval_jacobian(self, *args, **kwargs):
for idx, comp in enumerate(jac):
jac[idx] = np.stack(np.broadcast_arrays(*comp))

Ans = variabletuple('Ans', self.keys())
return Ans(*jac)
return ModelOutput(self.keys(), jac)

class HessianModel(GradientModel):
"""
Expand Down Expand Up @@ -878,8 +908,7 @@ def eval_hessian(self, *args, **kwargs):
for idx, comp in enumerate(hess):
hess[idx] = np.stack(np.broadcast_arrays(*comp))

Ans = variabletuple('Ans', self.keys())
return Ans(*hess)
return ModelOutput(self.keys(), hess)


class Model(HessianModel):
Expand Down Expand Up @@ -933,7 +962,7 @@ def __init__(self, model_dict, initial, *lsoda_args, **lsoda_kwargs):
self.lsoda_args = lsoda_args
self.lsoda_kwargs = lsoda_kwargs

sort_func = lambda symbol: str(symbol)
sort_func = operator.attrgetter('name')
# Mapping from dependent vars to their derivatives
self.dependent_derivatives = {d: list(d.free_symbols - set(d.variables))[0] for d in model_dict}
self.dependent_vars = sorted(
Expand All @@ -942,7 +971,7 @@ def __init__(self, model_dict, initial, *lsoda_args, **lsoda_kwargs):
)
self.independent_vars = sorted(set(d.variables[0] for d in model_dict), key=sort_func)
self.interdependent_vars = [] # TODO: add this support for ODEModels
if not len(self.independent_vars):
if not len(self.independent_vars) == 1:
raise ModelError('ODEModel can only have one independent variable.')

self.model_dict = OrderedDict(
Expand All @@ -951,23 +980,29 @@ def __init__(self, model_dict, initial, *lsoda_args, **lsoda_kwargs):
key=lambda i: sort_func(self.dependent_derivatives[i[0]])
)
)

# We split the parameters into the parameters needed to evaluate the
# expression/components (self.model_params), and those that are used for
# initial values (self.initial_params). self.params will contain a union
# of the two, as expected.

# Extract all the params and vars as a sorted, unique list.
expressions = model_dict.values()
model_params = set([])
self.model_params = set([])

# Only the once's that have a Parameter as initial parameter.
# self.initial_params = {value for var, value in self.initial.items()
# if isinstance(value, Parameter)}
# Only the ones that have a Parameter as initial parameter.
self.initial_params = {value for var, value in self.initial.items()
if isinstance(value, Parameter)}

for expression in expressions:
vars, params = seperate_symbols(expression)
model_params.update(params)
self.model_params.update(params)
# self.independent_vars.update(vars)

# Although unique now, params and vars should be sorted alphabetically to prevent ambiguity
self.params = sorted(model_params, key=sort_func)
# self.params = sorted(self.model_params | self.initial_params, key=sort_func)
# self.model_params = sorted(self.model_params, key=sort_func)
# self.initial_params = sorted(self.initial_params, key=sort_func)
self.params = sorted(self.model_params | self.initial_params, key=sort_func)
self.model_params = sorted(self.model_params, key=sort_func)
self.initial_params = sorted(self.initial_params, key=sort_func)

# Make Variable object corresponding to each sigma var.
self.sigmas = {var: Variable(name='sigma_{}'.format(var.name)) for var in self.dependent_vars}
Expand Down Expand Up @@ -1030,7 +1065,7 @@ def _ncomponents(self):
but to `D(y, t) = ...`. The system spanned by these component
therefore still needs to be integrated.
"""
return [sympy_to_py(expr, self.independent_vars + self.dependent_vars + self.params)
return [sympy_to_py(expr, self.independent_vars + self.dependent_vars + self.model_params)
for expr in self.values()]

@cached_property
Expand All @@ -1047,7 +1082,7 @@ def _njacobian(self):
"""
return [
[sympy_to_py(
sympy.diff(expr, var), self.independent_vars + self.dependent_vars + self.params
sympy.diff(expr, var), self.independent_vars + self.dependent_vars + self.model_params
) for var in self.dependent_vars
] for _, expr in self.items()
]
Expand All @@ -1070,6 +1105,13 @@ def eval_components(self, *args, **kwargs):
Dfun = lambda ys, t, *a: [[c(t, *(list(ys) + list(a))) for c in row] for row in self._njacobian]

initial_dependent = [self.initial[var] for var in self.dependent_vars]
# For the initial values, substitute any parameter for the value passed
# to this call. Scipy doesn't really understand Parameter/Symbols
for idx, init_var in enumerate(initial_dependent):
if init_var in self.initial_params:
initial_dependent[idx] = bound_arguments.arguments[init_var.name]

assert len(self.independent_vars) == 1
t_initial = self.initial[self.independent_vars[0]] # Assuming there's only one

# Check if the time-like data includes the initial value, because integration should start there.
Expand All @@ -1095,12 +1137,16 @@ def eval_components(self, *args, **kwargs):
# Properly ordered time axis containing t_initial
t_total = np.concatenate((t_smaller[::-1][:-1], t_bigger))

# Call the numerical integrator. Note that we only pass the
# model_params, which will be used by sympy_to_py to create something we
# can evaluate numerically.
ans_bigger = odeint(
f,
initial_dependent,
t_bigger,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
bound_arguments.arguments[param.name] for param in self.model_params
),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
Expand All @@ -1109,7 +1155,8 @@ def eval_components(self, *args, **kwargs):
initial_dependent,
t_smaller,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
bound_arguments.arguments[param.name] for param in self.model_params
),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
Expand Down Expand Up @@ -1139,9 +1186,7 @@ def __call__(self, *args, **kwargs):
:return: A namedtuple of all the dependent vars evaluated at the desired point. Will always return a tuple,
even for scalar valued functions. This is done for consistency.
"""
Ans = variabletuple('Ans', self)
ans = Ans(*self.eval_components(*args, **kwargs))
return ans
return ModelOutput(self.keys(), self.eval_components(*args, **kwargs))

def _partial_diff(var, *params):
"""
Expand Down
15 changes: 14 additions & 1 deletion tests/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
Function, diff
)
from symfit.core.models import (
jacobian_from_model, hessian_from_model, ModelError
jacobian_from_model, hessian_from_model, ModelError, ModelOutput
)

class TestModel(unittest.TestCase):
Expand Down Expand Up @@ -447,6 +447,19 @@ def test_interdependency(self):
self.assertEqual(model.__signature__, model.jacobian_model.__signature__)
self.assertEqual(model.__signature__, model.hessian_model.__signature__)

def test_ModelOutput(self):
"""
Test the ModelOutput object. To prevent #267 from recurring,
we attempt to make a model with more than 255 variables.
"""
params = parameters(','.join('a{}'.format(i) for i in range(300)))
data = np.ones(300)
output = ModelOutput(params, data)
self.assertEqual(len(output), 300)
self.assertIsInstance(output._asdict(), OrderedDict)
self.assertIsNot(output._asdict(), output.output_dict)


if __name__ == '__main__':
unittest.main()

34 changes: 33 additions & 1 deletion tests/test_ode.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import pytest
import numpy as np

from symfit import parameters, variables, ODEModel, exp, Fit, D, Model, GradientModel
from symfit import parameters, variables, ODEModel, exp, Fit, D, Model, GradientModel, Parameter
from symfit.core.minimizers import MINPACK
from symfit.distributions import Gaussian

Expand Down Expand Up @@ -97,6 +97,38 @@ def test_polgar(self):
# plt.legend()
# plt.show()

def test_initial_parameters(self):
"""
Identical to test_polgar, but with a0 as free Parameter.
"""
a, b, c, d, t = variables('a, b, c, d, t')
k, p, l, m = parameters('k, p, l, m')

a0 = Parameter('a0', min=0, value=10, fixed=True)
c0 = Parameter('c0', min=0, value=0.1)
b = a0 - d + a
model_dict = {
D(d, t): l * c * b - m * d,
D(c, t): k * a * b - p * c - l * c * b + m * d,
D(a, t): - k * a * b + p * c,
}

ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, c: c0, d: 0.0})

# Generate some data
tdata = np.linspace(0, 3, 1000)
# Eval
AA, AAB, BAAB = ode_model(t=tdata, k=0.1, l=0.2, m=.3, p=0.3, a0=10, c0=0)
fit = Fit(ode_model, t=tdata, a=AA, c=AAB, d=BAAB)
results = fit.execute()
print(results)
self.assertEqual(results.value(a0), 10)
self.assertAlmostEqual(results.value(c0), 0)

self.assertEqual([a0, c0, k, l, m, p], ode_model.params)
self.assertEqual([a0, c0], ode_model.initial_params)
self.assertEqual([a0, k, l, m, p], ode_model.model_params)

def test_simple_kinetics(self):
"""
Simple kinetics data to test fitting
Expand Down

0 comments on commit 60ed844

Please sign in to comment.