diff --git a/.travis.yml b/.travis.yml index 9591e50c..b3d92715 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,7 +37,7 @@ jobs: - python: "3.5" - python: "3.6" - python: "3.7" - - python: "2.7" + - python: "3.8" - stage: docs python: "3.7" diff --git a/requirements.txt b/requirements.txt index 3059f255..f65a13f1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ numpy >= 1.12 -scipy >= 1.0, <1.2 +scipy >= 1.0 sympy >= 1.2 toposort funcsigs; python_version < '3.0' functools32; python_version < '3.0' -six \ No newline at end of file +six diff --git a/setup.cfg b/setup.cfg index 9425dcfe..7c44761e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -11,12 +11,12 @@ classifier = Intended Audience :: Science/Research Topic :: Scientific/Engineering License :: OSI Approved :: MIT License - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + keywords = fit fitting symbolic [bdist_wheel] diff --git a/symfit/core/models.py b/symfit/core/models.py index f839c7ae..78e7c944 100644 --- a/symfit/core/models.py +++ b/symfit/core/models.py @@ -1,4 +1,5 @@ -from collections import namedtuple, Mapping, OrderedDict +from collections import Mapping, OrderedDict +import operator import warnings import sys @@ -18,39 +19,72 @@ else: import funcsigs as inspect_sig -def variabletuple(typename, variables, *args, **kwargs): + +class ModelOutput(tuple): """ - Create a :func:`~collections.namedtuple` using :class:`~symfit.core.argument.Variable`'s - whoses names will be used as `field_names`. + Object to hold the output of a model call. It mimics a + :func:`collections.namedtuple`, but is initiated with + :class:`~symfit.core.argument.Variable` objects instead of strings. - The main reason for using this object is the `_asdict()` method: whereas a - ``namedtuple`` initiates such an :class:`collections.OrderedDict` with the - ``field_names`` as keys, this object returns a - :class:`collections.OrderedDict` which immediately has the ``Variable`` - objects as keys. + Its information can be accessed using indexing or as attributes:: - Example:: + >>> x, y = variables('x, y') + >>> a, b = parameters('a, b') + >>> model = Model({y: a * x + b}) + + >>> ans = model(x=2, a=1, b=3) + >>> print(ans) + ModelOutput(variables=[y], output=[5]) + >>> ans[0] + 5 + >>> ans.y + 5 - >>> x = Variable('x') - >>> Result = variabletuple('Result', [x]) - >>> res = Result(5.0) - >>> res._asdict() - OrderedDict((x, 5.0)) - - :param typename: Name of the `variabletuple`. - :param variables: List of :class:`~symfit.core.argument.Variable`, to be used - as `field_names` - :param args: See :func:`~collections.namedtuple` - :param kwargs: See :func:`~collections.namedtuple` - :return: Type ``typename`` """ + def __new__(self, variables, output): + """ + ``variables`` and ``output`` need to be in the same order! + + :param variables: The variables corresponding to ``output``. + :param output: The output of a call which should be mapped to + ``variables``. + """ + return tuple.__new__(ModelOutput, output) + + def __init__(self, variables, output): + """ + ``variables`` and ``output`` need to be in the same order! + + :param variables: The variables corresponding to ``output``. + :param output: The output of a call which should be mapped to + ``variables``. + """ + self.variables = variables + self.output = output + self.output_dict = OrderedDict(zip(variables, output)) + self.variable_names = {var.name: var for var in variables} + + def __getattr__(self, name): + try: + var = self.variable_names[name] + except KeyError as err: + raise AttributeError(err) + return self.output_dict[var] + + def __getitem__(self, key): + return self.output[key] + + def __repr__(self): + return self.__class__.__name__ + '(variables={}, output={})'.format(self.variables, self.output) + def _asdict(self): - return OrderedDict(zip(variables, self)) + """ + :return: Returns a new OrderedDict representing this object. + """ + return self.output_dict.copy() - field_names = [var.name for var in variables] - named = namedtuple(typename, field_names, *args, **kwargs) - named._asdict = _asdict - return named + def __len__(self): + return len(self.output_dict) class ModelError(Exception): @@ -629,8 +663,7 @@ def __call__(self, *args, **kwargs): :return: A namedtuple of all the dependent vars evaluated at the desired point. Will always return a tuple, even for scalar valued functions. This is done for consistency. """ - Ans = variabletuple('Ans', self) - return Ans(*self.eval_components(*args, **kwargs)) + return ModelOutput(self.keys(), self.eval_components(*args, **kwargs)) class BaseGradientModel(BaseCallableModel): @@ -710,8 +743,7 @@ def eval_jacobian(self, *args, **kwargs): """ :return: The jacobian matrix of the function. """ - Ans = variabletuple('Ans', self) - return Ans(*self.finite_difference(*args, **kwargs)) + return ModelOutput(self.keys(), self.finite_difference(*args, **kwargs)) class CallableNumericalModel(BaseCallableModel, BaseNumericalModel): @@ -770,7 +802,6 @@ def numerical_components(self): :return: lambda functions of each of the analytical components in model_dict, to be used in numerical calculation. """ - Ans = variabletuple('Ans', self.keys()) # All components must feature the independent vars and params, that's # the API convention. But for those components which also contain # interdependence, we add those vars @@ -781,7 +812,7 @@ def numerical_components(self): key = lambda arg: [isinstance(arg, Parameter), str(arg)] ordered = sorted(dependencies, key=key) components.append(sympy_to_py(expr, ordered)) - return Ans(*components) + return ModelOutput(self.keys(), components) class GradientModel(CallableModel, BaseGradientModel): @@ -833,8 +864,7 @@ def eval_jacobian(self, *args, **kwargs): for idx, comp in enumerate(jac): jac[idx] = np.stack(np.broadcast_arrays(*comp)) - Ans = variabletuple('Ans', self.keys()) - return Ans(*jac) + return ModelOutput(self.keys(), jac) class HessianModel(GradientModel): """ @@ -878,8 +908,7 @@ def eval_hessian(self, *args, **kwargs): for idx, comp in enumerate(hess): hess[idx] = np.stack(np.broadcast_arrays(*comp)) - Ans = variabletuple('Ans', self.keys()) - return Ans(*hess) + return ModelOutput(self.keys(), hess) class Model(HessianModel): @@ -933,7 +962,7 @@ def __init__(self, model_dict, initial, *lsoda_args, **lsoda_kwargs): self.lsoda_args = lsoda_args self.lsoda_kwargs = lsoda_kwargs - sort_func = lambda symbol: str(symbol) + sort_func = operator.attrgetter('name') # Mapping from dependent vars to their derivatives self.dependent_derivatives = {d: list(d.free_symbols - set(d.variables))[0] for d in model_dict} self.dependent_vars = sorted( @@ -942,7 +971,7 @@ def __init__(self, model_dict, initial, *lsoda_args, **lsoda_kwargs): ) self.independent_vars = sorted(set(d.variables[0] for d in model_dict), key=sort_func) self.interdependent_vars = [] # TODO: add this support for ODEModels - if not len(self.independent_vars): + if not len(self.independent_vars) == 1: raise ModelError('ODEModel can only have one independent variable.') self.model_dict = OrderedDict( @@ -951,23 +980,29 @@ def __init__(self, model_dict, initial, *lsoda_args, **lsoda_kwargs): key=lambda i: sort_func(self.dependent_derivatives[i[0]]) ) ) + + # We split the parameters into the parameters needed to evaluate the + # expression/components (self.model_params), and those that are used for + # initial values (self.initial_params). self.params will contain a union + # of the two, as expected. + # Extract all the params and vars as a sorted, unique list. expressions = model_dict.values() - model_params = set([]) + self.model_params = set([]) - # Only the once's that have a Parameter as initial parameter. - # self.initial_params = {value for var, value in self.initial.items() - # if isinstance(value, Parameter)} + # Only the ones that have a Parameter as initial parameter. + self.initial_params = {value for var, value in self.initial.items() + if isinstance(value, Parameter)} for expression in expressions: vars, params = seperate_symbols(expression) - model_params.update(params) + self.model_params.update(params) # self.independent_vars.update(vars) + # Although unique now, params and vars should be sorted alphabetically to prevent ambiguity - self.params = sorted(model_params, key=sort_func) - # self.params = sorted(self.model_params | self.initial_params, key=sort_func) - # self.model_params = sorted(self.model_params, key=sort_func) - # self.initial_params = sorted(self.initial_params, key=sort_func) + self.params = sorted(self.model_params | self.initial_params, key=sort_func) + self.model_params = sorted(self.model_params, key=sort_func) + self.initial_params = sorted(self.initial_params, key=sort_func) # Make Variable object corresponding to each sigma var. self.sigmas = {var: Variable(name='sigma_{}'.format(var.name)) for var in self.dependent_vars} @@ -1030,7 +1065,7 @@ def _ncomponents(self): but to `D(y, t) = ...`. The system spanned by these component therefore still needs to be integrated. """ - return [sympy_to_py(expr, self.independent_vars + self.dependent_vars + self.params) + return [sympy_to_py(expr, self.independent_vars + self.dependent_vars + self.model_params) for expr in self.values()] @cached_property @@ -1047,7 +1082,7 @@ def _njacobian(self): """ return [ [sympy_to_py( - sympy.diff(expr, var), self.independent_vars + self.dependent_vars + self.params + sympy.diff(expr, var), self.independent_vars + self.dependent_vars + self.model_params ) for var in self.dependent_vars ] for _, expr in self.items() ] @@ -1070,6 +1105,13 @@ def eval_components(self, *args, **kwargs): Dfun = lambda ys, t, *a: [[c(t, *(list(ys) + list(a))) for c in row] for row in self._njacobian] initial_dependent = [self.initial[var] for var in self.dependent_vars] + # For the initial values, substitute any parameter for the value passed + # to this call. Scipy doesn't really understand Parameter/Symbols + for idx, init_var in enumerate(initial_dependent): + if init_var in self.initial_params: + initial_dependent[idx] = bound_arguments.arguments[init_var.name] + + assert len(self.independent_vars) == 1 t_initial = self.initial[self.independent_vars[0]] # Assuming there's only one # Check if the time-like data includes the initial value, because integration should start there. @@ -1095,12 +1137,16 @@ def eval_components(self, *args, **kwargs): # Properly ordered time axis containing t_initial t_total = np.concatenate((t_smaller[::-1][:-1], t_bigger)) + # Call the numerical integrator. Note that we only pass the + # model_params, which will be used by sympy_to_py to create something we + # can evaluate numerically. ans_bigger = odeint( f, initial_dependent, t_bigger, args=tuple( - bound_arguments.arguments[param.name] for param in self.params), + bound_arguments.arguments[param.name] for param in self.model_params + ), Dfun=Dfun, *self.lsoda_args, **self.lsoda_kwargs ) @@ -1109,7 +1155,8 @@ def eval_components(self, *args, **kwargs): initial_dependent, t_smaller, args=tuple( - bound_arguments.arguments[param.name] for param in self.params), + bound_arguments.arguments[param.name] for param in self.model_params + ), Dfun=Dfun, *self.lsoda_args, **self.lsoda_kwargs ) @@ -1139,9 +1186,7 @@ def __call__(self, *args, **kwargs): :return: A namedtuple of all the dependent vars evaluated at the desired point. Will always return a tuple, even for scalar valued functions. This is done for consistency. """ - Ans = variabletuple('Ans', self) - ans = Ans(*self.eval_components(*args, **kwargs)) - return ans + return ModelOutput(self.keys(), self.eval_components(*args, **kwargs)) def _partial_diff(var, *params): """ diff --git a/tests/test_model.py b/tests/test_model.py index bf0c5ced..77090c5e 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -15,7 +15,7 @@ Function, diff ) from symfit.core.models import ( - jacobian_from_model, hessian_from_model, ModelError + jacobian_from_model, hessian_from_model, ModelError, ModelOutput ) @@ -454,3 +454,15 @@ def test_interdependency(): assert model.__signature__ == model.jacobian_model.__signature__ assert model.__signature__ == model.hessian_model.__signature__ + +def test_ModelOutput(self): + """ + Test the ModelOutput object. To prevent #267 from recurring, + we attempt to make a model with more than 255 variables. + """ + params = parameters(','.join('a{}'.format(i) for i in range(300))) + data = np.ones(300) + output = ModelOutput(params, data) + assert len(output) == 300) + assert isinstance(output._asdict(), OrderedDict) + assert output._asdict() != output.output_dict diff --git a/tests/test_ode.py b/tests/test_ode.py index 6079d4ae..6311d63f 100644 --- a/tests/test_ode.py +++ b/tests/test_ode.py @@ -5,7 +5,7 @@ import pytest import numpy as np -from symfit import parameters, variables, ODEModel, exp, Fit, D, Model, GradientModel +from symfit import parameters, variables, ODEModel, exp, Fit, D, Model, GradientModel, Parameter from symfit.core.minimizers import MINPACK from symfit.distributions import Gaussian @@ -222,3 +222,35 @@ def test_odemodel_sanity(): } with pytest.raises(RuntimeWarning): fit = Fit(model_dict, t=tdata, a=adata) + +def test_initial_parameters(self): + """ + Identical to test_polgar, but with a0 as free Parameter. + """ + a, b, c, d, t = variables('a, b, c, d, t') + k, p, l, m = parameters('k, p, l, m') + + a0 = Parameter('a0', min=0, value=10, fixed=True) + c0 = Parameter('c0', min=0, value=0.1) + b = a0 - d + a + model_dict = { + D(d, t): l * c * b - m * d, + D(c, t): k * a * b - p * c - l * c * b + m * d, + D(a, t): - k * a * b + p * c, + } + + ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, c: c0, d: 0.0}) + + # Generate some data + tdata = np.linspace(0, 3, 1000) + # Eval + AA, AAB, BAAB = ode_model(t=tdata, k=0.1, l=0.2, m=.3, p=0.3, a0=10, c0=0) + fit = Fit(ode_model, t=tdata, a=AA, c=AAB, d=BAAB) + results = fit.execute() + print(results) + assert results.value(a0) == 10 + assert results.value(c0) == pytest.approx(0) + + assert ode_model.params == [a0, c0, k, l, m, p] + assert ode_model.initial_params == [a0, c0] + assert ode_model.model_params == [a0, k, l, m, p]