Skip to content

Commit

Permalink
Merge pull request #66 from brian-team/remove_lmfit
Browse files Browse the repository at this point in the history
Remove lmfit
  • Loading branch information
mstimberg committed Jun 21, 2022
2 parents e988e6d + 52f0ab9 commit c7b3b22
Show file tree
Hide file tree
Showing 5 changed files with 114 additions and 160 deletions.
11 changes: 8 additions & 3 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,10 @@ jobs:

runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: [3.7, 3.9]
latest-brian: [true, false]

steps:
- uses: actions/checkout@v2
Expand All @@ -25,9 +27,12 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest-coverage coveralls lmfit
pip install ".[full]"
python -m pip install --upgrade pip wheel
python -m pip install flake8 pytest-coverage coveralls
python -m pip install -r requirements.txt
- name: Update to latest Brian development version
run: python -m pip install -i https://test.pypi.org/simple/ --pre --upgrade Brian2
if: ${{ matrix.latest-brian }}
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
Expand Down
180 changes: 84 additions & 96 deletions brian2modelfitting/fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from numpy import (ones, array, arange, concatenate, mean, argmin, nanargmin,
reshape, zeros, sqrt, ndarray, broadcast_to, sum, cumsum,
hstack, tile, repeat)
from scipy.optimize import least_squares

from brian2.parsing.sympytools import sympy_to_str, str_to_sympy
from brian2.units.fundamentalunits import DIMENSIONLESS, get_dimensions, fail_for_dimension_mismatch
Expand Down Expand Up @@ -1173,8 +1174,7 @@ def refine(self, params=None, metric=None,
iteration=1e9, level=0, **kwds):
"""
Refine the fitting results with a sequentially operating minimization
algorithm. Uses the `lmfit <https://lmfit.github.io/lmfit-py/>`_
package which itself makes use of
algorithm. The `~.scipy.optimize.least_squares` algorithm from
`scipy.optimize <https://docs.scipy.org/doc/scipy/reference/optimize.html>`_.
Has to be called after `~.TraceFitter.fit`, but a call with
``n_rounds=0`` is enough.
Expand Down Expand Up @@ -1227,31 +1227,23 @@ def refine(self, params=None, metric=None,
Additional arguments can overwrite the bounds for individual
parameters (if not given, the bounds previously specified in the
call to `~.TraceFitter.fit` will be used). All other arguments will
be passed on to `.lmfit.minimize` and can be used to e.g. change the
method, or to specify method-specific arguments.
be passed on to `.~scipy.optimize.least_squares`.
Returns
-------
parameters : dict
The parameters at the end of the optimization process as a
dictionary.
result : `.lmfit.MinimizerResult`
result : `.scipy.optimize.OptimizeResult`
The result of the optimization process.
Notes
-----
The default method used by `lmfit` is least-squares minimization using
a Levenberg-Marquardt method. Note that there is no support for
specifying a `Metric`, the given output trace(s) will be subtracted
from the simulated trace(s) and passed on to the minimization algorithm.
This method always uses the runtime mode, independent of the selection
of the current device.
There is no support for specifying a `Metric`, the given output trace(s)
will be subtracted from the simulated trace(s) and passed on to the
minimization algorithm which will internally calculate the sum of
squares.
"""
try:
import lmfit
except ImportError:
raise ImportError('Refinement needs the "lmfit" package.')
if params is None:
if self.best_params is None:
raise TypeError('You need to either specify parameters or run '
Expand Down Expand Up @@ -1285,8 +1277,10 @@ def refine(self, params=None, metric=None,

callback_func = callback_setup(callback, None)

# Set up Parameter objects
parameters = lmfit.Parameters()
# Set up initial values and bounds
min_bounds = []
max_bounds = []
x0 = []
for param_name in self.parameter_names:
if param_name not in kwds:
if self.bounds is None:
Expand All @@ -1295,8 +1289,9 @@ def refine(self, params=None, metric=None,
min_bound, max_bound = self.bounds[param_name]
else:
min_bound, max_bound = kwds.pop(param_name)
parameters.add(param_name, value=array(params[param_name]),
min=array(min_bound), max=array(max_bound))
x0.append(params[param_name])
min_bounds.append(min_bound)
max_bounds.append(max_bound)

self.simulator = self.setup_simulator('refine', self.n_traces,
output_var=self.output_var,
Expand All @@ -1308,10 +1303,18 @@ def refine(self, params=None, metric=None,
t_start_steps = [int(round(t_s / self.dt)) if t_w is None else 0
for t_s, t_w in zip(t_start, t_weights)]

def _calc_error(params):
param_dic = get_param_dic([{p: params[p].value
for p in self.parameter_names}],

# TODO: Move all this into a class
tested_parameters = []
errors = []
combined_errors = []
n_evaluations = [-1]

def _calc_error(x):
param_dic = get_param_dic([{p: x[idx]
for idx, p in enumerate(self.parameter_names)}],
self.parameter_names, self.n_traces, 1)

self.simulator.run(self.duration, param_dic,
self.parameter_names, iteration=iteration,
name='refine')
Expand All @@ -1328,122 +1331,107 @@ def _calc_error(params):
else:
residual = (trace - out) * sqrt(t_w)
one_residual.append((residual*norm).flatten())
return array(hstack(one_residual))

def _calc_gradient(params):
residuals = []
for name in self.parameter_names:
one_residual = []
for out_var, t_s_steps, t_w, norm in zip(self.output_var,
t_start_steps,
t_weights,
normalization):
trace = getattr(self.simulator.statemonitor,
f'S_{out_var}_{name}_')
if t_w is None:
residual = trace[:, t_s_steps:]
else:
residual = trace * sqrt(t_w)
one_residual.append((residual*norm).flatten())
residuals.append(array(hstack(one_residual)))
gradient = array(residuals)
return gradient.T

tested_parameters = []
errors = []
combined_errors = []
def _callback_wrapper(params, iter, resid, *args, **kwds):
output_len = [output[:, t_s_steps:].size
for output, t_s_steps in zip(self.output,
t_start_steps)]
end_idx = cumsum(output_len)
start_idx = hstack([0, end_idx[:-1]])
error = tuple([mean(resid[start:end]**2)
for start, end in zip(start_idx, end_idx)])
error = tuple([mean(r**2) for r in one_residual])
combined_error = sum(array(error))
errors.append(error)
combined_errors.append(combined_error)
best_idx = argmin(combined_errors)

if self.use_units:
norm_dim = get_dimensions(normalization[0])**2
error_dim = self.output_dim[0]**2*norm_dim
for output_dim, norm in zip(self.output_dim[1:], normalization[1:]):
norm_dim = get_dimensions(norm)**2
other_dim = output_dim**2*norm_dim
norm_dim = get_dimensions(normalization[0]) ** 2
error_dim = self.output_dim[0] ** 2 * norm_dim
for output_dim, norm in zip(self.output_dim[1:],
normalization[1:]):
norm_dim = get_dimensions(norm) ** 2
other_dim = output_dim ** 2 * norm_dim
fail_for_dimension_mismatch(error_dim, other_dim,
error_message='The error terms have mismatching '
'units.')
all_errors = Quantity(combined_errors, dim=error_dim)
params = {p: Quantity(val, dim=self.model[p].dim)
for p, val in params.items()}
for p, val in zip(self.parameter_names, x)}
best_raw_error_normed = tuple([Quantity(raw_error,
dim=output_dim**2*get_dimensions(norm)**2)
dim=output_dim ** 2 * get_dimensions(
norm) ** 2)
for raw_error, output_dim, norm
in zip(errors[best_idx],
self.output_dim,
normalization)])
best_raw_error = tuple([Quantity(raw_error / norm**2,
dim=output_dim**2)
best_raw_error = tuple([Quantity(raw_error / norm ** 2,
dim=output_dim ** 2)
for raw_error, output_dim, norm
in zip(errors[best_idx],
self.output_dim,
normalization)])
else:
all_errors = array(combined_errors)
params = {p: float(val) for p, val in params.items()}
params = {p: float(val) for p, val
in zip(self.parameter_names, x)}
best_raw_error_normed = errors[best_idx]
best_raw_error = [err / norm**2
for norm, err in zip(errors[best_idx], normalization)]
best_raw_error = [err / norm ** 2
for norm, err in
zip(errors[best_idx], normalization)]
tested_parameters.append(params)

best_error = all_errors[best_idx]
best_params = tested_parameters[best_idx]
additional_info = {'objective_errors': best_raw_error,
'objective_errors_normalized': best_raw_error_normed,
'output_var': self.output_var}
return callback_func(params, errors,
best_params, best_error, iter, additional_info)
callback_func(params, errors,
best_params, best_error, n_evaluations[0], additional_info)
n_evaluations[0] += 1
return array(hstack(one_residual))

assert 'Dfun' not in kwds
def _calc_gradient(params):
residuals = []
for name in self.parameter_names:
one_residual = []
for out_var, t_s_steps, t_w, norm in zip(self.output_var,
t_start_steps,
t_weights,
normalization):
trace = getattr(self.simulator.statemonitor,
f'S_{out_var}_{name}_')
if t_w is None:
residual = trace[:, t_s_steps:]
else:
residual = trace * sqrt(t_w)
one_residual.append((residual*norm).flatten())
residuals.append(array(hstack(one_residual)))
gradient = array(residuals)
return gradient.T

assert 'jac' not in kwds
if calc_gradient:
kwds.update({'Dfun': _calc_gradient})
if 'iter_cb' in kwds:
# Use the given callback but raise a warning if callback is not
# set to None
if callback is not None:
logger.warn('The iter_cb keyword has been specified together '
f'with callback={callback!r}. Only the iter_cb '
'callback will be used. Use the standard '
'callback mechanism or set callback=None to '
'remove this warning.',
name_suffix='iter_cb_callback')
iter_cb = kwds.pop('iter_cb')
else:
iter_cb = _callback_wrapper

# Translate arguments for newer lmfit versions
if LooseVersion(lmfit.__version__) >= '1.0.1':
max_nfev = kwds.pop('max_nfev', None)
args = ['maxfev', 'maxiter']
for arg in args:
if arg in kwds:
if max_nfev is not None:
raise ValueError('Only specifiy a single \'max_nfev\' argument.')
max_nfev = kwds.pop(arg)
if max_nfev:
kwds['max_nfev'] = max_nfev

result = lmfit.minimize(_calc_error, parameters,
iter_cb=iter_cb,
**kwds)
kwds.update({'jac': _calc_gradient})

if 'maxfev' in kwds:
if 'max_nfev' in kwds:
raise ValueError("Cannot provide both 'maxfev' and 'max_nfev' "
"as arguments. Please only provide "
"'max_nfev'.")
logger.warn("The 'maxfev' argument is deprecated, please use "
"'max_nfev' instead.", name_suffix='deprecated_maxfev',
once=True)
kwds['max_nfev'] = kwds.pop('maxfev')

result = least_squares(_calc_error, x0,
bounds=(min_bounds, max_bounds),
**kwds)

if self.use_units:
param_dict = {p: Quantity(float(val), dim=self.model[p].dim)
for p, val in result.params.items()}
for p, val in zip(self.parameter_names, result.x)}
else:
param_dict = {p: float(val)
for p, val in result.params.items()}
for p, val in zip(self.parameter_names, result.x)}

return param_dict, result

Expand Down
Loading

0 comments on commit c7b3b22

Please sign in to comment.