From f5aedd559bffb34cc43c09011cb27e4d66290fb5 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Mon, 5 Dec 2016 13:35:31 -0500 Subject: [PATCH 1/6] Add 'brute_step' attribute to Parameter class. --- lmfit/parameter.py | 50 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/lmfit/parameter.py b/lmfit/parameter.py index 0780f97e6..872a27173 100644 --- a/lmfit/parameter.py +++ b/lmfit/parameter.py @@ -109,6 +109,7 @@ def __deepcopy__(self, memo): min=par.min, max=par.max) param.vary = par.vary + param.brute_step = par.brute_step param.stderr = par.stderr param.correl = par.correl param.init_value = par.init_value @@ -234,7 +235,8 @@ def pretty_repr(self, oneline=False): return s def pretty_print(self, oneline=False, colwidth=8, precision=4, fmt='g', - columns=['value', 'min', 'max', 'stderr', 'vary', 'expr']): + columns=['value', 'min', 'max', 'stderr', 'vary', 'expr', + 'brute_step']): """Pretty-print parameters data. Parameters @@ -262,7 +264,8 @@ def pretty_print(self, oneline=False, colwidth=8, precision=4, fmt='g', print(title.format(*allcols, name_len=name_len, n=colwidth).title()) numstyle = '{%s:>{n}.{p}{f}}' # format for numeric columns otherstyles = dict(name='{name:<{name_len}} ', stderr='{stderr!s:>{n}}', - vary='{vary!s:>{n}}', expr='{expr!s:>{n}}') + vary='{vary!s:>{n}}', expr='{expr!s:>{n}}', + brute_step='{brute_step!s:>{n}}') line = ' '.join([otherstyles.get(k, numstyle % k) for k in allcols]) for name, values in sorted(self.items()): pvalues = {k: getattr(values, k) for k in columns} @@ -271,10 +274,14 @@ def pretty_print(self, oneline=False, colwidth=8, precision=4, fmt='g', if 'stderr' in columns and pvalues['stderr'] is not None: pvalues['stderr'] = (numstyle % '').format( pvalues['stderr'], n=colwidth, p=precision, f=fmt) + elif 'brute_step' in columns and pvalues['brute_step'] is not None: + pvalues['brute_step'] = (numstyle % '').format( + pvalues['brute_step'], n=colwidth, p=precision, f=fmt) print(line.format(name_len=name_len, n=colwidth, p=precision, f=fmt, **pvalues)) - def add(self, name, value=None, vary=True, min=-inf, max=inf, expr=None): + def add(self, name, value=None, vary=True, min=-inf, max=inf, expr=None, + brute_step=None): """ Convenience function for adding a Parameter: @@ -290,7 +297,8 @@ def add(self, name, value=None, vary=True, min=-inf, max=inf, expr=None): self.__setitem__(name.name, name) else: self.__setitem__(name, Parameter(value=value, name=name, vary=vary, - min=min, max=max, expr=expr)) + min=min, max=max, expr=expr, + brute_step=brute_step)) def add_many(self, *parlist): """ @@ -303,15 +311,15 @@ def add_many(self, *parlist): is a sequence of tuples, then each tuple must contain at least the name. The order in each tuple is the following: - name, value, vary, min, max, expr + name, value, vary, min, max, expr, brute_step Example ------- p = Parameters() # add a sequence of tuples - p.add_many( (name1, val1, True, None, None, None), - (name2, val2, True, 0.0, None, None), - (name3, val3, False, None, None, None), + p.add_many( (name1, val1, True, None, None, None, None), + (name2, val2, True, 0.0, None, None, None), + (name3, val3, False, None, None, None, None), (name4, val4)) # add a sequence of Parameter @@ -440,7 +448,7 @@ class Parameter(object): Of the form `{'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}` """ def __init__(self, name=None, value=None, vary=True, - min=-inf, max=inf, expr=None): + min=-inf, max=inf, expr=None, brute_step=None): """ Parameters ---------- @@ -456,6 +464,8 @@ def __init__(self, name=None, value=None, vary=True, Upper bound for value (None or inf means no upper bound). expr : str, optional Mathematical expression used to constrain the value during the fit. + brute_step : float, optional + Step size for grid points in brute force method. """ self.name = name self._val = value @@ -463,6 +473,7 @@ def __init__(self, name=None, value=None, vary=True, self.init_value = value self.min = min self.max = max + self.brute_step = brute_step self.vary = vary self._expr = expr self._expr_ast = None @@ -474,7 +485,8 @@ def __init__(self, name=None, value=None, vary=True, self.from_internal = lambda val: val self._init_bounds() - def set(self, value=None, vary=None, min=None, max=None, expr=None): + def set(self, value=None, vary=None, min=None, max=None, expr=None, + brute_step=None): """ Set or update Parameter attributes. @@ -491,6 +503,9 @@ def set(self, value=None, vary=None, min=None, max=None, expr=None): expr : str, optional Mathematical expression used to constrain the value during the fit. To remove a constraint you must supply an empty string. + brute_step : float, optional + Step size for grid points in brute force method. To remove the step + size you must supply an emtpy string. """ if value is not None: @@ -511,6 +526,12 @@ def set(self, value=None, vary=None, min=None, max=None, expr=None): if expr is not None: self.__set_expression(expr) + if brute_step is not None: + if brute_step == '': + self.brute_step = None + else: + self.brute_step = brute_step + def _init_bounds(self): """make sure initial bounds are self-consistent""" # _val is None means - infinity. @@ -535,12 +556,13 @@ def _init_bounds(self): def __getstate__(self): """get state for pickle""" return (self.name, self.value, self.vary, self.expr, self.min, - self.max, self.stderr, self.correl, self.init_value) + self.max, self.brute_step, self.stderr, self.correl, + self.init_value) def __setstate__(self, state): """set state for pickle""" - (self.name, self.value, self.vary, self.expr, self.min, - self.max, self.stderr, self.correl, self.init_value) = state + (self.name, self.value, self.vary, self.expr, self.min, self.max, + self.brute_step, self.stderr, self.correl, self.init_value) = state self._expr_ast = None self._expr_eval = None self._expr_deps = [] @@ -560,6 +582,8 @@ def __repr__(self): s.append("bounds=[%s:%s]" % (repr(self.min), repr(self.max))) if self._expr is not None: s.append("expr='%s'" % self.expr) + if self.brute_step is not None: + s.append("brute_step=%s" % (self.brute_step)) return "" % ', '.join(s) def setup_bounds(self): From 4d811edf03322fcac14ced21f39f112372ac6187 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Mon, 5 Dec 2016 14:11:15 -0500 Subject: [PATCH 2/6] Add brute method that wraps around scipy.optimize.brute. * enh: handling of infinite bounds As long as brute_step and at least an upper bound, lower bound or value is specified the brute method will work. * add: attributes to MinimizerResult Output from scipy.optimize.brute (x0, fval, grid, Jout) is added as result.brute_. Attributes 'candidates' and 'show_candidates' contain/show a number of best solutions from the brute force method. * add: penalty_brute() -- brute method requires the penalty function, but with apply_bounds_transformation=False * fix: __residuals() -- fvars should always be an array --- lmfit/minimizer.py | 157 ++++++++++++++++++++++++++++++++++++++++++++- lmfit/parameter.py | 4 +- 2 files changed, 158 insertions(+), 3 deletions(-) diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py index 46c6487c3..fb74ea01a 100644 --- a/lmfit/minimizer.py +++ b/lmfit/minimizer.py @@ -11,6 +11,7 @@ """ +from collections import namedtuple from copy import deepcopy import numpy as np from numpy import (dot, eye, ndarray, ones_like, @@ -33,6 +34,7 @@ from scipy.optimize import leastsq as scipy_leastsq from scipy.optimize import minimize as scipy_minimize +from scipy.optimize import brute as scipy_brute # differential_evolution is only present in scipy >= 0.15 try: @@ -257,6 +259,16 @@ def flatchain(self): else: return None + @property + def show_candidates(self): + """ + A pretty_print() representation of the candidates from the brute force + method. + """ + if hasattr(self, 'candidates'): + for i, candidate in enumerate(self.candidates): + print("\nCandidate # {}, chisqr = {:.3f}".format(i, candidate.score)) + candidate.params.pretty_print() class Minimizer(object): """A general minimizer for curve fitting and optimization. @@ -396,6 +408,9 @@ def __residual(self, fvars, apply_bounds_transformation=True): return None params = self.result.params + if fvars.shape == (): + fvars = fvars.reshape((1,)) + if apply_bounds_transformation: for name, val in zip(self.result.var_names, fvars): params[name].value = params[name].from_internal(val) @@ -471,6 +486,27 @@ def penalty(self, fvars): r = r.sum() return r + def penalty_brute(self, fvars): + """ + Penalty function for brute force method. + + Parameters + ---------- + fvars : numpy.ndarray + Array of values for the variable parameters + + Returns + ------- + r : float + The user evaluated user-supplied objective function. If the + objective function is an array of size greater than 1, return the + array sum-of-squares + """ + r = self.__residual(fvars, apply_bounds_transformation=False) + if isinstance(r, ndarray) and r.size > 1: + r = (r*r).sum() + return r + def prepare_fit(self, params=None): """ Prepares parameters for fitting, return array of initial values. @@ -1294,6 +1330,121 @@ def leastsq(self, params=None, **kws): np.seterr(**orig_warn_settings) return result + def brute(self, params=None, Ns=20, keep=50): + """ + Use the ``brute force`` method, :scipydoc:`scipy.optimize.brute`, + to find the global minimum of a function. + + It assumes that the input Parameters have been initialized, and a + function to minimize has been properly set up. + + Parameters + ---------- + params : :class:`lmfit.parameter.Parameters` object, optional + Contains the Parameters for the model; if None, then the + Parameters used to initialize the Minimizer object are used. + Ns : int, optional + Number of grid points along the axes, if not otherwise specified + keep : int, optional + Number of best candidates from the brute force method that are + stored in the .candidates attribute. If 'all', then all grid + points from brute are stored as candidates. + + The following, default, minimizer options are passed to + :scipydoc:`scipy.optimize.brute` and cannot be changed: + full_output=1, finish=None, disp=False + + Returns + ------- + :class:`MinimizerResult` + Object containing the parameters from the brute force method. + The return values (x0, fval, grid, Jout) from + `scipy.optimize.brute` are stored as ``brute_`` attributes. + The `MinimizerResult` also contains the ``candidates`` and + ``show_candidates`` attributes. The ``candidates`` attribute + contains the parameters and chisqr from the brute force method as + a namedtuple, ('Candidate', ['params', 'score']), sorted on the + (lowest) chisqr value. To access the values for a particular + candidate use `result.candidate[#].params` or + `result.candidate[#].score`, where a lower # represents a better + candidate. The ``show_candidates`` attribute, will show the + candidates using the `pretty_print` method. + """ + result = self.prepare_fit(params=params) + result.method = 'brute' + + brute_kws = dict(full_output=1, finish=None, disp=False) + + varying = np.asarray([par.vary for par in self.params.values()]) + replace_none = lambda x, sign: sign*np.inf if x is None else x + lower_bounds = np.asarray([replace_none(i.min, -1) for i in + self.params.values()])[varying] + upper_bounds = np.asarray([replace_none(i.max, 1) for i in + self.params.values()])[varying] + value = np.asarray([i.value for i in self.params.values()])[varying] + stepsize = np.asarray([i.brute_step for i in self.params.values()])[varying] + + ranges = [] + for i, step in enumerate(stepsize): + if np.all(np.isfinite([lower_bounds[i], upper_bounds[i]])): + # lower AND upper bounds are specified (brute_step optional) + par_range = ((lower_bounds[i], upper_bounds[i], step) + if step else (lower_bounds[i], upper_bounds[i])) + elif np.isfinite(lower_bounds[i]) and step: + # lower bound AND brute_step are specified + par_range = (lower_bounds[i], lower_bounds[i] + Ns*step, step) + elif np.isfinite(upper_bounds[i]) and step: + # upper bound AND brute_step are specified + par_range = (upper_bounds[i] - Ns*step, upper_bounds[i], step) + elif np.isfinite(value[i]) and step: + # no bounds, but an initial value is specified + par_range = (value[i] - (Ns//2)*step, value[i] + (Ns//2)*step, + step) + else: + raise ValueError('Not enough information provided for the brute ' + 'force method. Please specify bounds or at ' + 'least an initial value and brute_step for ' + 'parameter "{}".'.format(result.var_names[i])) + ranges.append(par_range) + + ret = scipy_brute(self.penalty_brute, tuple(ranges), Ns=Ns, **brute_kws) + + result.brute_x0 = ret[0] + result.brute_fval = ret[1] + result.brute_grid = ret[2] + result.brute_Jout = ret[3] + + # sort the results of brute and populate .candidates attribute + grid_score = ret[3].ravel() # chisqr + grid_points = [par.ravel() for par in ret[2]] + + if len(result.var_names) == 1: + grid_result = np.array([res for res in zip(zip(grid_points), grid_score)], + dtype=[('par', 'O'), ('score', 'float64')]) + else: + grid_result = np.array([res for res in zip(zip(*grid_points), grid_score)], + dtype=[('par', 'O'), ('score', 'float64')]) + grid_result_sorted = grid_result[grid_result.argsort(order='score')] + + result.candidates = [] + candidate = namedtuple('Candidate', ['params', 'score']) + + if keep == 'all': + keep_candidates = len(grid_result_sorted) + else: + keep_candidates = min(len(grid_result_sorted), keep) + + for data in grid_result_sorted[:keep_candidates]: + pars = deepcopy(self.params) + for i, par in enumerate(result.var_names): + pars[par].value = data[0][i] + result.candidates.append(candidate(params=pars, score=data[1])) + + result.params = result.candidates[0].params + result.chisqr = ret[1] + + return result + def minimize(self, method='leastsq', params=None, **kws): """ Perform the minimization. @@ -1318,6 +1469,8 @@ def minimize(self, method='leastsq', params=None, **kws): - `'dogleg'`: Dogleg - `'slsqp'`: Sequential Linear Squares Programming - `'differential_evolution'`: differential evolution + - `'brute'`: brute force method. + Uses `scipy.optimize.brute`. For more details on the fitting methods please refer to the `scipy docs `__. @@ -1350,6 +1503,8 @@ def minimize(self, method='leastsq', params=None, **kws): function = self.leastsq elif user_method.startswith('least_s'): function = self.least_squares + elif user_method.startswith('brute'): + function = self.brute else: function = self.scalar_minimize for key, val in SCALAR_METHODS.items(): @@ -1653,7 +1808,7 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, and is equivalent to:: fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws, - iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws) + iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws) fitter.minimize(method=method) """ diff --git a/lmfit/parameter.py b/lmfit/parameter.py index 872a27173..b0c88a52f 100644 --- a/lmfit/parameter.py +++ b/lmfit/parameter.py @@ -505,7 +505,7 @@ def set(self, value=None, vary=None, min=None, max=None, expr=None, To remove a constraint you must supply an empty string. brute_step : float, optional Step size for grid points in brute force method. To remove the step - size you must supply an emtpy string. + size you must supply 0 ("zero"). """ if value is not None: @@ -527,7 +527,7 @@ def set(self, value=None, vary=None, min=None, max=None, expr=None, self.__set_expression(expr) if brute_step is not None: - if brute_step == '': + if brute_step == 0.0: self.brute_step = None else: self.brute_step = brute_step From fae5939e9202c7c3f0c8ffeed12ef52a0cea5b11 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Mon, 12 Dec 2016 12:46:36 -0500 Subject: [PATCH 3/6] Add an example demonstrating the features of brute. --- examples/lmfit_brute.py | 208 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 examples/lmfit_brute.py diff --git a/examples/lmfit_brute.py b/examples/lmfit_brute.py new file mode 100644 index 000000000..56ffb375f --- /dev/null +++ b/examples/lmfit_brute.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python +# + +from __future__ import print_function +import copy + +import numpy as np + +from lmfit import minimize, Minimizer, Parameters + +# EXAMPLE 1 # +# taken from: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brute.html + +# create a set of Parameters +params = Parameters() +params.add_many( + ('a', 2, False, None, None, None), + ('b', 3, False, None, None, None), + ('c', 7, False, None, None, None), + ('d', 8, False, None, None, None), + ('e', 9, False, None, None, None), + ('f', 10, False, None, None, None), + ('g', 44, False, None, None, None), + ('h', -1, False, None, None, None), + ('i', 2, False, None, None, None), + ('j', 26, False, None, None, None), + ('k', 1, False, None, None, None), + ('l', -2, False, None, None, None), + ('scale', 0.5, False, None, None, None), + ('x', 0.0, True, -4.0, 4.0, None, 0.25), + ('y', 0.0, True, -4.0, 4.0, None, 0.25), + ) + +# define functions +def f1(p): + par = p.valuesdict() + return (par['a'] * par['x']**2 + par['b'] * par['x'] * par['y'] + + par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] + par['f']) + +def f2(p): + par = p.valuesdict() + return (-1.0*par['g']*np.exp(-((par['x']-par['h'])**2 + + (par['y']-par['i'])**2) / par['scale'])) + +def f3(p): + par = p.valuesdict() + return (-1.0*par['j']*np.exp(-((par['x']-par['k'])**2 + + (par['y']-par['l'])**2) / par['scale'])) + +# define objective function: returns the array to be minimized +def f(params): + return f1(params) + f2(params) + f3(params) + + +# 1. show the effect of 'Ns': finite bounds for varying parameters ('x' and 'y'), +# and brute_step = 0.25 +fitter = Minimizer(f, params) +result = fitter.minimize(method='brute', Ns=10, keep=5) +grid_x = np.unique([par.ravel() for par in result.brute_grid][0]) +grid_y = np.unique([par.ravel() for par in result.brute_grid][1]) + +print("===========================================" + "\nExample 1, taken from scipy.optimize.brute:\n" + "===========================================\n\n" + "Varying parameters with finite bounds and brute_step = 0.25:") +print(" {}\n {}".format(params['x'], params['y'])) +print("\nUsing the brute method with" + "\n\tresult = fitter.minimize(method='brute', keep=5) " + "\nwill generate a 2-dimensional grid, where 'x' and 'y' vary between " + "\n'min' and 'max' (exclusive) spaced by 'brute_step': " + "\n\nx: {}\ny: {}".format(grid_x, grid_y)) +print("\nThe objective function is evaluated on this grid, and the raw output " + "\nfrom scipy.optimize.brute is stored in brute_ attributes.") +print("\nFor further use with lmfit, a number (determined by 'keep') of best scoring " + "\nresults from the brute force method is converted to lmfit.Parameters " + "\nobjects and stored in the .candidates attribute.") +print("\nThe optimal result from the brute force method is:") +print("result.candidates[0].score = {:.3f} (value of objective function)".\ + format(result.candidates[0].score)) +print("result.candidates[0].params = ") +result.candidates[0].params.pretty_print() + + +## EXAMPLE 2: see examples/doc_basic.py + +# create data to be fitted +x = np.linspace(0, 15, 301) +data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) + + np.random.normal(size=len(x), scale=0.2) ) + +# define objective function: returns the array to be minimized +def fcn2min(params, x, data): + """ model decaying sine wave, subtract data""" + amp = params['amp'] + shift = params['shift'] + omega = params['omega'] + decay = params['decay'] + model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay) + return model - data + +# create a set of Parameters +params = Parameters() +params.add('amp', value=7, min=2.5) +params.add('decay', value=0.05) +params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2) +params.add('omega', value=3, max=5) + +# perform grid search with brute force method +# add brute_step for parameters that do not have both bounds specified +params['amp'].set(brute_step=0.25) +params['decay'].set(brute_step=0.005) +params['omega'].set(brute_step=0.25) + +print("\n\n====================================================================" + "\nExample 2: demonstrate how the grid points are determined and how to" + "\nperform a leastsq minimization on the best candidates." + "\n====================================================================\n") + +print("Initial parameters:") +params.pretty_print() + +fitter = Minimizer(fcn2min, params, fcn_args=(x, data)) + +print("\nUsing the brute method with" + "\n\tresult_brute = fitter.minimize(method='brute', Ns=25, keep=25)" + "\nwill generate a grid, where the parameter ranges depend on the " + "\nsettings for value/min/max/brute_step as shown below.\n") + +shift_grid = \ +np.array([-1.57079633, -1.43989663, -1.30899694, -1.17809725, -1.04719755, + -0.91629786, -0.78539816, -0.65449847, -0.52359878, -0.39269908, + -0.26179939, -0.13089969, 0. , 0.13089969, 0.26179939, + 0.39269908, 0.52359878, 0.65449847, 0.78539816, 0.91629786, + 1.04719755, 1.17809725, 1.30899694, 1.43989663, 1.57079633]) +amp_grid = \ +np.array([ 2.5 , 2.75, 3. , 3.25, 3.5 , 3.75, 4. , 4.25, 4.5 , + 4.75, 5. , 5.25, 5.5 , 5.75, 6. , 6.25, 6.5 , 6.75, + 7. , 7.25, 7.5 , 7.75, 8. , 8.25, 8.5 ]) +omega_grid = \ +np.array([-1.25, -1. , -0.75, -0.5 , -0.25, 0. , 0.25, 0.5 , 0.75, + 1. , 1.25, 1.5 , 1.75, 2. , 2.25, 2.5 , 2.75, 3. , + 3.25, 3.5 , 3.75, 4. , 4.25, 4.5 , 4.75]) +decay_grid = \ +np.array([ -1.00000000e-02, -5.00000000e-03, 5.20417043e-18, + 5.00000000e-03, 1.00000000e-02, 1.50000000e-02, + 2.00000000e-02, 2.50000000e-02, 3.00000000e-02, + 3.50000000e-02, 4.00000000e-02, 4.50000000e-02, + 5.00000000e-02, 5.50000000e-02, 6.00000000e-02, + 6.50000000e-02, 7.00000000e-02, 7.50000000e-02, + 8.00000000e-02, 8.50000000e-02, 9.00000000e-02, + 9.50000000e-02, 1.00000000e-01, 1.05000000e-01]) + +print("* amp, lower bound and brute_step: 25 (Ns) points between 'min' ({}) " + "\nand 'min + Ns*brute_step' ({}, not inclusive)." + "\namp_grid = {}\n".format(params['amp'].min, params['amp'].min + + 25*params['amp'].brute_step, amp_grid)) +print("* decay, no bounds, but value and brute_step: 25 (Ns) points between " + "\n'value - (Ns//2)*brute_step' ({:.2f}) and 'value + (Ns//2)*brute_step' ({:.2f}, not inclusive)." + "\ndecay_grid = {}\n".format(params['decay'].value - (25//2) * + params['decay'].brute_step, + params['decay'].value + (25//2) * + params['decay'].brute_step, decay_grid)) +print("* shift, finite bounds and no brute_step: 25 (Ns) points between 'min' " + "and 'max'.\nshift_grid = {}\n".format(shift_grid)) +print("* omega, upper bound and brute_step: 25 (Ns) points between 'max - " + "Ns*brute_step' ({}) \nand 'max' ({}, not inclusive)." + "\namp_grid = {}\n".format(params['amp'].min, params['amp'].min + + 25*params['amp'].brute_step, amp_grid)) + +result_brute = fitter.minimize(method='brute', Ns=25, keep=25) + +print("Since we used keep=25, the twenty-five best scoring candidates from " + "the brute force \nmethod are converted to lmfit.Parameters objects " + "and stored in the .candidates attribute. \nNow we can perform a leastsq " + "minimization on these candidates as follows:\n") +print("\tbest_result = copy.deepcopy(result_brute)" + "\n\tfor candidate in result_brute.candidates:" + "\n\t trial = fitter.minimize(method='leastsq', params=candidate.params)" + "\n\t if trial.chisqr < best_result.chisqr:" + "\n\t best_result = trial") + +best_result = copy.deepcopy(result_brute) +for candidate in result_brute.candidates: + trial = fitter.minimize(method='leastsq', params=candidate.params) + if trial.chisqr < best_result.chisqr: + best_result = trial + +print("\nBest candidate from brute force method: chi-sqr = {:.3f}".\ + format(result_brute.chisqr)) +result_brute.params.pretty_print() + +print("\nBest result after the leastsq minimization: chi-sqr = {:.3f}".\ + format(best_result.chisqr)) +best_result.params.pretty_print() + +try: + import pylab + pylab.plot(x, data, 'k+') + pylab.plot(x, data + fcn2min(result_brute.params, x, data), 'b', + label='brute: best candidate #1') + pylab.plot(x, data + fcn2min(result_brute.candidates[24].params, x, data), + 'g', label='brute: candidate #25') + pylab.plot(x, data + best_result.residual, 'r--', + label='optimal leastsq result') + pylab.legend(loc='best') +except: + pass +# From b13ec46ac92e430acfb016564eacce2e165fe821 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sun, 29 Jan 2017 15:42:40 -0500 Subject: [PATCH 4/6] Add new tests for the brute force method. Test added (test_brute_method.py) and included testing for 'brute_step' in test_params_set.py --- tests/test_brute_method.py | 231 +++++++++++++++++++++++++++++++++++++ tests/test_params_set.py | 32 +++++ 2 files changed, 263 insertions(+) create mode 100644 tests/test_brute_method.py diff --git a/tests/test_brute_method.py b/tests/test_brute_method.py new file mode 100644 index 000000000..3d2ab862a --- /dev/null +++ b/tests/test_brute_method.py @@ -0,0 +1,231 @@ +from __future__ import print_function +import numpy as np +from numpy.testing import (assert_, decorators, assert_raises, + assert_almost_equal, assert_equal, + assert_allclose) +from scipy import optimize +import lmfit + + +# use example problem described int he scipy documentation: +# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brute.html + +# setup for scipy-brute optimization # +params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) + +def f1(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) + +def f2(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) + +def f3(z, *params): + x, y = z + a, b, c, d, e, f, g, h, i, j, k, l, scale = params + return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) + +def f(z, *params): + return f1(z, *params) + f2(z, *params) + f3(z, *params) +# setup for scipy-brute optimization # + +# setup for lmfit-brute optimization # +params_lmfit = lmfit.Parameters() +params_lmfit.add_many( + ('a', 2, False, None, None, None), + ('b', 3, False, None, None, None), + ('c', 7, False, None, None, None), + ('d', 8, False, None, None, None), + ('e', 9, False, None, None, None), + ('f', 10, False, None, None, None), + ('g', 44, False, None, None, None), + ('h', -1, False, None, None, None), + ('i', 2, False, None, None, None), + ('j', 26, False, None, None, None), + ('k', 1, False, None, None, None), + ('l', -2, False, None, None, None), + ('scale', 0.5, False, None, None, None), + ('x', -4.0, True, -4.0, 4.0, None, None), + ('y', -2.0, True, -2.0, 2.0, None, None), + ) + +def f1_lmfit(p): + par = p.valuesdict() + return (par['a'] * par['x']**2 + par['b'] * par['x'] * par['y'] + + par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] + + par['f']) + +def f2_lmfit(p): + par = p.valuesdict() + return (-1.0*par['g']*np.exp(-((par['x']-par['h'])**2 + + (par['y']-par['i'])**2) / par['scale'])) + +def f3_lmfit(p): + par = p.valuesdict() + return (-1.0*par['j']*np.exp(-((par['x']-par['k'])**2 + + (par['y']-par['l'])**2) / par['scale'])) + +def f_lmfit(params_lmfit): + return f1_lmfit(params_lmfit) + f2_lmfit(params_lmfit) + f3_lmfit(params_lmfit) +# setup for lmfit-brute optimization ### + + +def test_brute_lmfit_vs_scipy(): + # The tests below are to make sure that the implementation of the brute + # method in lmfit gives identical results to scipy.optimize.brute, when + # using finite bounds for all varying parameters. + + # TEST 1: using bounds, with (default) Ns=20 and no stepsize specified + assert(not params_lmfit['x'].brute_step) # brute_step for x == None + assert(not params_lmfit['y'].brute_step) # brute_step for y == None + + rranges = ((-4, 4), (-2, 2)) + resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=20, + finish=None) + fitter = lmfit.Minimizer(f_lmfit, params_lmfit) + resbrute_lmfit = fitter.minimize(method='brute', Ns=20) + + assert_equal(resbrute[2], resbrute_lmfit.brute_grid, verbose=True) # grid identical + assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical + assert_equal(resbrute[0][0], resbrute_lmfit.brute_x0[0], verbose=True) # best fit x value identical + assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value stored correctly + assert_equal(resbrute[0][1], resbrute_lmfit.brute_x0[1], verbose=True) # best fit y value identical + assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value stored correctly + assert_equal(resbrute[1], resbrute_lmfit.brute_fval, verbose=True) # best fit function value identical + assert_equal(resbrute[1], resbrute_lmfit.chisqr, verbose=True) # best fit function value stored correctly + + # TEST 2: using bounds, setting Ns=40 and no stepsize specified + assert(not params_lmfit['x'].brute_step) # brute_step for x == None + assert(not params_lmfit['y'].brute_step) # brute_step for y == None + + rranges = ((-4, 4), (-2, 2)) + resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=40, + finish=None) + fitter = lmfit.Minimizer(f_lmfit, params_lmfit) + resbrute_lmfit = fitter.minimize(method='brute', Ns=40) + + assert_equal(resbrute[2], resbrute_lmfit.brute_grid, verbose=True) # grid identical + assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical + assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value identical + assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value identical + assert_equal(resbrute[1], resbrute_lmfit.chisqr, verbose=True) # best fit function value identical + + # TEST 3: using bounds and specifing stepsize for both parameters + params_lmfit['x'].set(brute_step=0.25) + params_lmfit['y'].set(brute_step=0.25) + assert_equal(params_lmfit['x'].brute_step, 0.25 ,verbose=True) + assert_equal(params_lmfit['y'].brute_step, 0.25 ,verbose=True) + + rranges = (slice(-4, 4, 0.25), slice(-2, 2, 0.25)) + resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=20, + finish=None) + fitter = lmfit.Minimizer(f_lmfit, params_lmfit) + resbrute_lmfit = fitter.minimize(method='brute') + + assert_equal(resbrute[2], resbrute_lmfit.brute_grid, verbose=True) # grid identical + assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical + assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value identical + assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value identical + assert_equal(resbrute[1], resbrute_lmfit.chisqr, verbose=True) # best fit function value identical + + # TEST 4: using bounds, Ns=10, adn specifing stepsize for parameter 'x' + params_lmfit['x'].set(brute_step=0.15) + params_lmfit['y'].set(brute_step=0) # brute_step for y == None + assert_equal(params_lmfit['x'].brute_step, 0.15 ,verbose=True) + assert(not params_lmfit['y'].brute_step) + + rranges = (slice(-4, 4, 0.15), (-2, 2)) + resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=10, + finish=None) + fitter = lmfit.Minimizer(f_lmfit, params_lmfit) + resbrute_lmfit = fitter.minimize(method='brute', Ns=10, keep='all') + + assert_equal(resbrute[2], resbrute_lmfit.brute_grid, verbose=True) # grid identical + assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical + assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value identical + assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value identical + assert_equal(resbrute[1], resbrute_lmfit.chisqr, verbose=True) # best fit function value identical + + +def test_brute(): + # The tests below are to make sure that the implementation of the brute + # method in lmfit works as intended. + + # restore original settings for paramers 'x' and 'y' + params_lmfit.add_many( + ('x', -4.0, True, -4.0, 4.0, None, None), + ('y', -2.0, True, -2.0, 2.0, None, None)) + + # TEST 1: only upper bound and brute_step specified, using default Ns=20 + Ns = 20 + params_lmfit['x'].set(min=-np.inf) + params_lmfit['x'].set(brute_step=0.25) + fitter = lmfit.Minimizer(f_lmfit, params_lmfit) + resbrute_lmfit = fitter.minimize(method='brute') + grid_x_expected = np.linspace(params_lmfit['x'].max - Ns*params_lmfit['x'].brute_step, + params_lmfit['x'].max, Ns, False) + grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0]) + assert_almost_equal(grid_x_expected, grid_x, verbose=True) + grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1]) + grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].max, Ns) + assert_almost_equal(grid_y_expected, grid_y, verbose=True) + + # TEST 2: only lower bound and brute_step specified, using Ns=15 + Ns = 15 + params_lmfit['y'].set(max=np.inf) + params_lmfit['y'].set(brute_step=0.1) + fitter = lmfit.Minimizer(f_lmfit, params_lmfit) + resbrute_lmfit = fitter.minimize(method='brute', Ns=15) + grid_x_expected = np.linspace(params_lmfit['x'].max - Ns*params_lmfit['x'].brute_step, + params_lmfit['x'].max, Ns, False) + grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0]) + assert_almost_equal(grid_x_expected, grid_x, verbose=True) + grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1]) + grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].min + Ns*params_lmfit['y'].brute_step, Ns, False) + assert_almost_equal(grid_y_expected, grid_y, verbose=True) + + # TEST 3: only value and brute_step specified, using Ns=15 + Ns = 15 + params_lmfit['x'].set(max=np.inf) + params_lmfit['x'].set(min=-np.inf) + params_lmfit['x'].set(brute_step=0.1) + fitter = lmfit.Minimizer(f_lmfit, params_lmfit) + resbrute_lmfit = fitter.minimize(method='brute', Ns=15) + grid_x_expected = np.linspace(params_lmfit['x'].value - (Ns//2)*params_lmfit['x'].brute_step, + params_lmfit['x'].value + (Ns//2)*params_lmfit['x'].brute_step, Ns) + grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0]) + assert_almost_equal(grid_x_expected, grid_x, verbose=True) + grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1]) + grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].min + Ns*params_lmfit['y'].brute_step, Ns, False) + assert_almost_equal(grid_y_expected, grid_y, verbose=True) + + # TEST 3: only value and brute_step specified, using Ns=15 + fitter = lmfit.Minimizer(f_lmfit, params_lmfit) + resbrute_lmfit = fitter.minimize(method='brute', Ns=15) + grid_x_expected = np.linspace(params_lmfit['x'].value - (Ns//2)*params_lmfit['x'].brute_step, + params_lmfit['x'].value + (Ns//2)*params_lmfit['x'].brute_step, Ns) + grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0]) + assert_almost_equal(grid_x_expected, grid_x, verbose=True) + grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1]) + grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].min + Ns*params_lmfit['y'].brute_step, Ns, False) + assert_almost_equal(grid_y_expected, grid_y, verbose=True) + + # TEST 4: check for correct functioning of keep argument and candidates attribute + params_lmfit.add_many( # restore original settings for paramers 'x' and 'y' + ('x', -4.0, True, -4.0, 4.0, None, None), + ('y', -2.0, True, -2.0, 2.0, None, None)) + + fitter = lmfit.Minimizer(f_lmfit, params_lmfit) + resbrute_lmfit = fitter.minimize(method='brute') + assert(len(resbrute_lmfit.candidates) == 50) # default number of stored candidates + + resbrute_lmfit = fitter.minimize(method='brute', keep=10) + assert(len(resbrute_lmfit.candidates) == 10) + + assert(isinstance(resbrute_lmfit.candidates[0].params, lmfit.Parameters)) + +test_brute_lmfit_vs_scipy() +test_brute() diff --git a/tests/test_params_set.py b/tests/test_params_set.py index 1e7deb359..5adbc7721 100644 --- a/tests/test_params_set.py +++ b/tests/test_params_set.py @@ -60,6 +60,7 @@ def test_param_set(): assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].expr == amplitude_expr) assert(params['amplitude'].vary == amplitude_vary) + assert(not params['amplitude'].brute_step) # test for possible regressions of this fix (without 'expr'): # the set function should only change the requested attribute(s) @@ -70,6 +71,7 @@ def test_param_set(): assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].vary == amplitude_vary) assert(params['amplitude'].expr == amplitude_expr) + assert(not params['amplitude'].brute_step) # set minimum params['amplitude'].set(min=10.0) @@ -79,6 +81,7 @@ def test_param_set(): assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].vary == amplitude_vary) assert(params['amplitude'].expr == amplitude_expr) + assert(not params['amplitude'].brute_step) # set maximum params['amplitude'].set(max=110.0) @@ -88,6 +91,7 @@ def test_param_set(): assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].vary == amplitude_vary) assert(params['amplitude'].expr == amplitude_expr) + assert(not params['amplitude'].brute_step) # set vary params['amplitude'].set(vary=False) @@ -97,6 +101,17 @@ def test_param_set(): assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True) assert(params['amplitude'].vary == False) assert(params['amplitude'].expr == amplitude_expr) + assert(not params['amplitude'].brute_step) + + # set brute_step + params['amplitude'].set(brute_step=0.1) + params.update_constraints() + assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True) + assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True) + assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True) + assert(params['amplitude'].vary == False) + assert(params['amplitude'].expr == amplitude_expr) + assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True) # test for possible regressions of this fix for variables WITH 'expr': height_value = params['height'].value @@ -104,6 +119,7 @@ def test_param_set(): height_max = params['height'].max height_vary = params['height'].vary height_expr = params['height'].expr + height_brute_step = params['height'].brute_step # set vary=True should remove expression params['height'].set(vary=True) @@ -113,6 +129,7 @@ def test_param_set(): assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary == True) assert(params['height'].expr == None) + assert(params['height'].brute_step == height_brute_step) # setting an expression should set vary=False params['height'].set(expr=height_expr) @@ -122,6 +139,7 @@ def test_param_set(): assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary == False) assert(params['height'].expr == height_expr) + assert(params['height'].brute_step == height_brute_step) # changing min/max should not remove expression params['height'].set(min=0) @@ -131,8 +149,20 @@ def test_param_set(): assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary == height_vary) assert(params['height'].expr == height_expr) + assert(params['height'].brute_step == height_brute_step) + + # changing brute_step should not remove expression + params['height'].set(brute_step=0.1) + params.update_constraints() + assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True) + assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True) + assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) + assert(params['height'].vary == height_vary) + assert(params['height'].expr == height_expr) + assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True) # changing the value should remove expression and keep vary=False + params['height'].set(brute_step=0) params['height'].set(value=10.0) params.update_constraints() assert_allclose(params['height'].value, 10.0, 1e-4, 1e-4, '', True) @@ -140,6 +170,7 @@ def test_param_set(): assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary == False) assert(params['height'].expr == None) + assert(params['height'].brute_step == height_brute_step) # passing expr='' should only remove the expression params['height'].set(expr=height_expr) # first restore the original expr @@ -151,5 +182,6 @@ def test_param_set(): assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True) assert(params['height'].vary == False) assert(params['height'].expr == None) + assert(params['height'].brute_step == height_brute_step) test_param_set() From 032f8755ca8b9a39898208fbe7f81207033d5fc9 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sun, 29 Jan 2017 21:55:18 -0500 Subject: [PATCH 5/6] Update documentation and some docstrings changes. - update documentation/docstrings for brute/brute_step - updated outdated documentation/docstrings for set function - update occurences of :scipydoc: - fix mixed tab/space indentation - fix typos - consistency of capitalization and such --- doc/bounds.rst | 4 +- doc/builtin_models.rst | 322 +++++++++++++++++------------------ doc/confidence.rst | 26 +-- doc/constraints.rst | 32 ++-- doc/faq.rst | 2 +- doc/fitting.rst | 155 ++++++++--------- doc/installation.rst | 2 +- doc/intro.rst | 12 +- doc/model.rst | 375 ++++++++++++++++++++--------------------- doc/parameters.rst | 122 ++++++++------ lmfit/confidence.py | 4 +- lmfit/minimizer.py | 133 +++++++++------ lmfit/parameter.py | 24 +-- lmfit/printfuncs.py | 8 +- 14 files changed, 635 insertions(+), 586 deletions(-) diff --git a/doc/bounds.rst b/doc/bounds.rst index 40f839037..9485b311e 100644 --- a/doc/bounds.rst +++ b/doc/bounds.rst @@ -23,14 +23,14 @@ for `MINUIT`_. This is implemented following (and borrowing heavily from) the `leastsqbound`_ from J. J. Helmus. Parameter values are mapped from internally used, freely variable values :math:`P_{\rm internal}` to bounded parameters :math:`P_{\rm bounded}`. When both ``min`` and ``max`` bounds -are specified, the mapping is +are specified, the mapping is: .. math:: :nowrap: \begin{eqnarray*} P_{\rm internal} &=& \arcsin\big(\frac{2 (P_{\rm bounded} - {\rm min})}{({\rm max} - {\rm min})} - 1\big) \\ - P_{\rm bounded} &=& {\rm min} + \big(\sin(P_{\rm internal}) + 1\big) \frac{({\rm max} - {\rm min})}{2} + P_{\rm bounded} &=& {\rm min} + \big(\sin(P_{\rm internal}) + 1\big) \frac{({\rm max} - {\rm min})}{2} \end{eqnarray*} With only an upper limit ``max`` supplied, but ``min`` left unbounded, the diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst index 7c1e06b0a..b3c7d0571 100644 --- a/doc/builtin_models.rst +++ b/doc/builtin_models.rst @@ -70,7 +70,7 @@ to report full width at half maximum and maximum peak height, respectively. where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to :math:`\mu`, and ``sigma`` to :math:`\sigma`. The full width at half maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately -:math:`2.3548\sigma` +:math:`2.3548\sigma`. :class:`LorentzianModel` @@ -289,7 +289,7 @@ It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 + {\operatorname{erf}}\bigl[ - \frac{\gamma(x-\mu)}{\sigma\sqrt{2}} + \frac{\gamma(x-\mu)}{\sigma\sqrt{2}} \bigr] \Bigr\} @@ -326,7 +326,7 @@ of many components of composite model. .. class:: ConstantModel(missing=None[, prefix=''[, name=None[, **kws]]]) - a class that consists of a single value, ``c``. This is constant in the + A class that consists of a single value, ``c``. This is constant in the sense of having no dependence on the independent variable ``x``, not in the sense of being non-varying. To be clear, ``c`` will be a variable Parameter. @@ -336,7 +336,7 @@ of many components of composite model. .. class:: LinearModel(missing=None[, prefix=''[, name=None[, **kws]]]) - a class that gives a linear model: + A class that gives a linear model: .. math:: @@ -351,7 +351,7 @@ with parameters ``slope`` for :math:`m` and ``intercept`` for :math:`b`. .. class:: QuadraticModel(missing=None[, prefix=''[, name=None[, **kws]]]) - a class that gives a quadratic model: + A class that gives a quadratic model: .. math:: @@ -365,7 +365,7 @@ with parameters ``a``, ``b``, and ``c``. .. class:: ParabolicModel(missing=None[, prefix=''[, name=None[, **kws]]]) - same as :class:`QuadraticModel`. + Same as :class:`QuadraticModel`. :class:`PolynomialModel` @@ -374,7 +374,7 @@ with parameters ``a``, ``b``, and ``c``. .. class:: PolynomialModel(degree, missing=None[, prefix=''[, name=None[, **kws]]]) - a class that gives a polynomial model up to ``degree`` (with maximum + A class that gives a polynomial model up to ``degree`` (with maximum value of 7). .. math:: @@ -505,11 +505,11 @@ mathematical constraints as discussed in :ref:`constraints_chapter`. A model using the user-supplied mathematical expression, which can be nearly any valid Python expresion. - :param expr: expression use to build model + :param expr: Expression use to build model. :type expr: string - :param independent_vars: list of argument names in expression that are independent variables. - :type independent_vars: ``None`` (default) or list of strings for independent variables. - :param init_script: python script to run before parsing and evaluating expression. + :param independent_vars: List of argument names in expression that are independent variables. + :type independent_vars: ``None`` (default) or list of strings for independent variables + :param init_script: Python script to run before parsing and evaluating expression. :type init_script: ``None`` (default) or string with other parameters passed to :class:`model.Model`, with the notable @@ -560,16 +560,16 @@ could to define this in a script:: >>> script = """ def mycurve(x, amp, cen, sig): - loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig) - gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig) - return log(loren)*gradient(gauss)/gradient(x) + loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig) + gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig) + return log(loren)*gradient(gauss)/gradient(x) """ and then use this with :class:`ExpressionModel` as:: >>> mod = ExpressionModel('mycurve(x, height, mid, wid)', - init_script=script, - independent_vars=['x']) + init_script=script, + independent_vars=['x']) As above, this will interpret the parameter names to be `height`, `mid`, and `wid`, and build a model that can be used to fit data. @@ -603,23 +603,23 @@ built-in default values. We'll simply use:: which prints out the results:: [[Model]] - Model(gaussian) + Model(gaussian) [[Fit Statistics]] - # function evals = 23 - # data points = 401 - # variables = 3 - chi-square = 29.994 - reduced chi-square = 0.075 - Akaike info crit = -1033.774 - Bayesian info crit = -1021.792 + # function evals = 23 + # data points = 401 + # variables = 3 + chi-square = 29.994 + reduced chi-square = 0.075 + Akaike info crit = -1033.774 + Bayesian info crit = -1021.792 [[Variables]] - sigma: 1.23218319 +/- 0.007374 (0.60%) (init= 1.35) - center: 9.24277049 +/- 0.007374 (0.08%) (init= 9.25) - amplitude: 30.3135571 +/- 0.157126 (0.52%) (init= 29.08159) - fwhm: 2.90156963 +/- 0.017366 (0.60%) == '2.3548200*sigma' - height: 9.81457973 +/- 0.050872 (0.52%) == '0.3989423*amplitude/max(1.e-15, sigma)' + sigma: 1.23218319 +/- 0.007374 (0.60%) (init= 1.35) + center: 9.24277049 +/- 0.007374 (0.08%) (init= 9.25) + amplitude: 30.3135571 +/- 0.157126 (0.52%) (init= 29.08159) + fwhm: 2.90156963 +/- 0.017366 (0.60%) == '2.3548200*sigma' + height: 9.81457973 +/- 0.050872 (0.52%) == '0.3989423*amplitude/max(1.e-15, sigma)' [[Correlations]] (unreported correlations are < 0.250) - C(sigma, amplitude) = 0.577 + C(sigma, amplitude) = 0.577 We see a few interesting differences from the results of the previous chapter. First, the parameter names are longer. Second, there are ``fwhm`` @@ -652,23 +652,23 @@ with the rest of the script as above. Perhaps predictably, the first thing we try gives results that are worse:: [[Model]] - Model(lorentzian) + Model(lorentzian) [[Fit Statistics]] - # function evals = 27 - # data points = 401 - # variables = 3 - chi-square = 53.754 - reduced chi-square = 0.135 - Akaike info crit = -799.830 - Bayesian info crit = -787.848 + # function evals = 27 + # data points = 401 + # variables = 3 + chi-square = 53.754 + reduced chi-square = 0.135 + Akaike info crit = -799.830 + Bayesian info crit = -787.848 [[Variables]] - sigma: 1.15484517 +/- 0.013156 (1.14%) (init= 1.35) - center: 9.24438944 +/- 0.009275 (0.10%) (init= 9.25) - amplitude: 38.9728645 +/- 0.313857 (0.81%) (init= 36.35199) - fwhm: 2.30969034 +/- 0.026312 (1.14%) == '2.0000000*sigma' - height: 10.7420881 +/- 0.086336 (0.80%) == '0.3183099*amplitude/max(1.e-15, sigma)' + sigma: 1.15484517 +/- 0.013156 (1.14%) (init= 1.35) + center: 9.24438944 +/- 0.009275 (0.10%) (init= 9.25) + amplitude: 38.9728645 +/- 0.313857 (0.81%) (init= 36.35199) + fwhm: 2.30969034 +/- 0.026312 (1.14%) == '2.0000000*sigma' + height: 10.7420881 +/- 0.086336 (0.80%) == '0.3183099*amplitude/max(1.e-15, sigma)' [[Correlations]] (unreported correlations are < 0.250) - C(sigma, amplitude) = 0.709 + C(sigma, amplitude) = 0.709 with the plot shown on the right in the figure above. The tails are now @@ -681,24 +681,24 @@ does a better job. Using :class:`VoigtModel`, this is as simple as using:: with all the rest of the script as above. This gives:: [[Model]] - Model(voigt) + Model(voigt) [[Fit Statistics]] - # function evals = 19 - # data points = 401 - # variables = 3 - chi-square = 14.545 - reduced chi-square = 0.037 - Akaike info crit = -1324.006 - Bayesian info crit = -1312.024 + # function evals = 19 + # data points = 401 + # variables = 3 + chi-square = 14.545 + reduced chi-square = 0.037 + Akaike info crit = -1324.006 + Bayesian info crit = -1312.024 [[Variables]] - amplitude: 35.7554017 +/- 0.138614 (0.39%) (init= 43.62238) - sigma: 0.73015574 +/- 0.003684 (0.50%) (init= 0.8775) - center: 9.24411142 +/- 0.005054 (0.05%) (init= 9.25) - gamma: 0.73015574 +/- 0.003684 (0.50%) == 'sigma' - fwhm: 2.62951718 +/- 0.013269 (0.50%) == '3.6013100*sigma' - height: 19.5360268 +/- 0.075691 (0.39%) == '0.3989423*amplitude/max(1.e-15, sigma)' + amplitude: 35.7554017 +/- 0.138614 (0.39%) (init= 43.62238) + sigma: 0.73015574 +/- 0.003684 (0.50%) (init= 0.8775) + center: 9.24411142 +/- 0.005054 (0.05%) (init= 9.25) + gamma: 0.73015574 +/- 0.003684 (0.50%) == 'sigma' + fwhm: 2.62951718 +/- 0.013269 (0.50%) == '3.6013100*sigma' + height: 19.5360268 +/- 0.075691 (0.39%) == '0.3989423*amplitude/max(1.e-15, sigma)' [[Correlations]] (unreported correlations are < 0.250) - C(sigma, amplitude) = 0.651 + C(sigma, amplitude) = 0.651 which has a much better value for :math:`\chi^2` and an obviously better @@ -731,26 +731,26 @@ give it a starting value using something like:: which gives:: [[Model]] - Model(voigt) + Model(voigt) [[Fit Statistics]] - # function evals = 23 - # data points = 401 - # variables = 4 - chi-square = 10.930 - reduced chi-square = 0.028 - Akaike info crit = -1436.576 - Bayesian info crit = -1420.600 + # function evals = 23 + # data points = 401 + # variables = 4 + chi-square = 10.930 + reduced chi-square = 0.028 + Akaike info crit = -1436.576 + Bayesian info crit = -1420.600 [[Variables]] - amplitude: 34.1914716 +/- 0.179468 (0.52%) (init= 43.62238) - sigma: 0.89518950 +/- 0.014154 (1.58%) (init= 0.8775) - center: 9.24374845 +/- 0.004419 (0.05%) (init= 9.25) - gamma: 0.52540156 +/- 0.018579 (3.54%) (init= 0.7) - fwhm: 3.22385492 +/- 0.050974 (1.58%) == '3.6013100*sigma' - height: 15.2374711 +/- 0.299235 (1.96%) == '0.3989423*amplitude/max(1.e-15, sigma)' + amplitude: 34.1914716 +/- 0.179468 (0.52%) (init= 43.62238) + sigma: 0.89518950 +/- 0.014154 (1.58%) (init= 0.8775) + center: 9.24374845 +/- 0.004419 (0.05%) (init= 9.25) + gamma: 0.52540156 +/- 0.018579 (3.54%) (init= 0.7) + fwhm: 3.22385492 +/- 0.050974 (1.58%) == '3.6013100*sigma' + height: 15.2374711 +/- 0.299235 (1.96%) == '0.3989423*amplitude/max(1.e-15, sigma)' [[Correlations]] (unreported correlations are < 0.250) - C(sigma, gamma) = -0.928 - C(gamma, amplitude) = 0.821 - C(sigma, amplitude) = -0.651 + C(sigma, gamma) = -0.928 + C(gamma, amplitude) = 0.821 + C(sigma, amplitude) = -0.651 and the fit shown on the right above. @@ -795,31 +795,31 @@ After making a composite model, we run :meth:`fit` and report the results, which gives:: [[Model]] - (Model(step, prefix='step_', form='erf') + Model(linear, prefix='line_')) + (Model(step, prefix='step_', form='erf') + Model(linear, prefix='line_')) [[Fit Statistics]] - # function evals = 51 - # data points = 201 - # variables = 5 - chi-square = 584.829 - reduced chi-square = 2.984 - Akaike info crit = 224.671 - Bayesian info crit = 241.187 + # function evals = 51 + # data points = 201 + # variables = 5 + chi-square = 584.829 + reduced chi-square = 2.984 + Akaike info crit = 224.671 + Bayesian info crit = 241.187 [[Variables]] - line_slope: 2.03039786 +/- 0.092221 (4.54%) (init= 0) - line_intercept: 11.7234542 +/- 0.274094 (2.34%) (init= 10.7816) - step_amplitude: 112.071629 +/- 0.647316 (0.58%) (init= 134.0885) - step_sigma: 0.67132341 +/- 0.010873 (1.62%) (init= 1.428571) - step_center: 3.12697699 +/- 0.005151 (0.16%) (init= 2.5) + line_slope: 2.03039786 +/- 0.092221 (4.54%) (init= 0) + line_intercept: 11.7234542 +/- 0.274094 (2.34%) (init= 10.7816) + step_amplitude: 112.071629 +/- 0.647316 (0.58%) (init= 134.0885) + step_sigma: 0.67132341 +/- 0.010873 (1.62%) (init= 1.428571) + step_center: 3.12697699 +/- 0.005151 (0.16%) (init= 2.5) [[Correlations]] (unreported correlations are < 0.100) - C(line_slope, step_amplitude) = -0.878 - C(step_amplitude, step_sigma) = 0.563 - C(line_slope, step_sigma) = -0.455 - C(line_intercept, step_center) = 0.427 - C(line_slope, line_intercept) = -0.308 - C(line_slope, step_center) = -0.234 - C(line_intercept, step_sigma) = -0.139 - C(line_intercept, step_amplitude) = -0.121 - C(step_amplitude, step_center) = 0.109 + C(line_slope, step_amplitude) = -0.878 + C(step_amplitude, step_sigma) = 0.563 + C(line_slope, step_sigma) = -0.455 + C(line_intercept, step_center) = 0.427 + C(line_slope, line_intercept) = -0.308 + C(line_slope, step_center) = -0.234 + C(line_intercept, step_sigma) = -0.139 + C(line_intercept, step_amplitude) = -0.121 + C(step_amplitude, step_center) = 0.109 with a plot of @@ -857,39 +857,39 @@ parameter values. The fit results printed out are:: [[Model]] - ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_')) + ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_')) [[Fit Statistics]] - # function evals = 66 - # data points = 250 - # variables = 8 - chi-square = 1247.528 - reduced chi-square = 5.155 - Akaike info crit = 417.865 - Bayesian info crit = 446.036 + # function evals = 66 + # data points = 250 + # variables = 8 + chi-square = 1247.528 + reduced chi-square = 5.155 + Akaike info crit = 417.865 + Bayesian info crit = 446.036 [[Variables]] - exp_amplitude: 99.0183282 +/- 0.537487 (0.54%) (init= 162.2102) - exp_decay: 90.9508859 +/- 1.103105 (1.21%) (init= 93.24905) - g1_sigma: 16.6725753 +/- 0.160481 (0.96%) (init= 15) - g1_center: 107.030954 +/- 0.150067 (0.14%) (init= 105) - g1_amplitude: 4257.77319 +/- 42.38336 (1.00%) (init= 2000) - g1_fwhm: 39.2609139 +/- 0.377905 (0.96%) == '2.3548200*g1_sigma' - g1_height: 101.880231 +/- 0.592170 (0.58%) == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)' - g2_sigma: 13.8069484 +/- 0.186794 (1.35%) (init= 15) - g2_center: 153.270100 +/- 0.194667 (0.13%) (init= 155) - g2_amplitude: 2493.41770 +/- 36.16947 (1.45%) (init= 2000) - g2_fwhm: 32.5128782 +/- 0.439866 (1.35%) == '2.3548200*g2_sigma' - g2_height: 72.0455934 +/- 0.617220 (0.86%) == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)' + exp_amplitude: 99.0183282 +/- 0.537487 (0.54%) (init= 162.2102) + exp_decay: 90.9508859 +/- 1.103105 (1.21%) (init= 93.24905) + g1_sigma: 16.6725753 +/- 0.160481 (0.96%) (init= 15) + g1_center: 107.030954 +/- 0.150067 (0.14%) (init= 105) + g1_amplitude: 4257.77319 +/- 42.38336 (1.00%) (init= 2000) + g1_fwhm: 39.2609139 +/- 0.377905 (0.96%) == '2.3548200*g1_sigma' + g1_height: 101.880231 +/- 0.592170 (0.58%) == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)' + g2_sigma: 13.8069484 +/- 0.186794 (1.35%) (init= 15) + g2_center: 153.270100 +/- 0.194667 (0.13%) (init= 155) + g2_amplitude: 2493.41770 +/- 36.16947 (1.45%) (init= 2000) + g2_fwhm: 32.5128782 +/- 0.439866 (1.35%) == '2.3548200*g2_sigma' + g2_height: 72.0455934 +/- 0.617220 (0.86%) == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)' [[Correlations]] (unreported correlations are < 0.500) - C(g1_sigma, g1_amplitude) = 0.824 - C(g2_sigma, g2_amplitude) = 0.815 - C(exp_amplitude, exp_decay) = -0.695 - C(g1_sigma, g2_center) = 0.684 - C(g1_center, g2_amplitude) = -0.669 - C(g1_center, g2_sigma) = -0.652 - C(g1_amplitude, g2_center) = 0.648 - C(g1_center, g2_center) = 0.621 - C(g1_sigma, g1_center) = 0.507 - C(exp_decay, g1_amplitude) = -0.507 + C(g1_sigma, g1_amplitude) = 0.824 + C(g2_sigma, g2_amplitude) = 0.815 + C(exp_amplitude, exp_decay) = -0.695 + C(g1_sigma, g2_center) = 0.684 + C(g1_center, g2_amplitude) = -0.669 + C(g1_center, g2_sigma) = -0.652 + C(g1_amplitude, g2_center) = 0.648 + C(g1_center, g2_center) = 0.621 + C(g1_sigma, g1_center) = 0.507 + C(exp_decay, g1_amplitude) = -0.507 We get a very good fit to this problem (described at the NIST site as of average difficulty, but the tests there are generally deliberately challenging) by @@ -915,9 +915,9 @@ this, and by defining an :func:`index_of` function to limit the data range. That is, with:: def index_of(arrval, value): - "return index of array *at or below* value " - if value < min(arrval): return 0 - return max(np.where(arrval<=value)[0]) + "return index of array *at or below* value " + if value < min(arrval): return 0 + return max(np.where(arrval<=value)[0]) ix1 = index_of(x, 75) ix2 = index_of(x, 135) @@ -932,39 +932,39 @@ giving to identical values (to the precision printed out in the report), but in few steps, and without any bounds on parameters at all:: [[Model]] - ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_')) + ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_')) [[Fit Statistics]] - # function evals = 48 - # data points = 250 - # variables = 8 - chi-square = 1247.528 - reduced chi-square = 5.155 - Akaike info crit = 417.865 - Bayesian info crit = 446.036 + # function evals = 48 + # data points = 250 + # variables = 8 + chi-square = 1247.528 + reduced chi-square = 5.155 + Akaike info crit = 417.865 + Bayesian info crit = 446.036 [[Variables]] - exp_amplitude: 99.0183281 +/- 0.537487 (0.54%) (init= 94.53724) - exp_decay: 90.9508862 +/- 1.103105 (1.21%) (init= 111.1985) - g1_sigma: 16.6725754 +/- 0.160481 (0.96%) (init= 14.5) - g1_center: 107.030954 +/- 0.150067 (0.14%) (init= 106.5) - g1_amplitude: 4257.77322 +/- 42.38338 (1.00%) (init= 2126.432) - g1_fwhm: 39.2609141 +/- 0.377905 (0.96%) == '2.3548200*g1_sigma' - g1_height: 101.880231 +/- 0.592171 (0.58%) == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)' - g2_sigma: 13.8069481 +/- 0.186794 (1.35%) (init= 15) - g2_center: 153.270100 +/- 0.194667 (0.13%) (init= 150) - g2_amplitude: 2493.41766 +/- 36.16948 (1.45%) (init= 1878.892) - g2_fwhm: 32.5128777 +/- 0.439866 (1.35%) == '2.3548200*g2_sigma' - g2_height: 72.0455935 +/- 0.617221 (0.86%) == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)' + exp_amplitude: 99.0183281 +/- 0.537487 (0.54%) (init= 94.53724) + exp_decay: 90.9508862 +/- 1.103105 (1.21%) (init= 111.1985) + g1_sigma: 16.6725754 +/- 0.160481 (0.96%) (init= 14.5) + g1_center: 107.030954 +/- 0.150067 (0.14%) (init= 106.5) + g1_amplitude: 4257.77322 +/- 42.38338 (1.00%) (init= 2126.432) + g1_fwhm: 39.2609141 +/- 0.377905 (0.96%) == '2.3548200*g1_sigma' + g1_height: 101.880231 +/- 0.592171 (0.58%) == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)' + g2_sigma: 13.8069481 +/- 0.186794 (1.35%) (init= 15) + g2_center: 153.270100 +/- 0.194667 (0.13%) (init= 150) + g2_amplitude: 2493.41766 +/- 36.16948 (1.45%) (init= 1878.892) + g2_fwhm: 32.5128777 +/- 0.439866 (1.35%) == '2.3548200*g2_sigma' + g2_height: 72.0455935 +/- 0.617221 (0.86%) == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)' [[Correlations]] (unreported correlations are < 0.500) - C(g1_sigma, g1_amplitude) = 0.824 - C(g2_sigma, g2_amplitude) = 0.815 - C(exp_amplitude, exp_decay) = -0.695 - C(g1_sigma, g2_center) = 0.684 - C(g1_center, g2_amplitude) = -0.669 - C(g1_center, g2_sigma) = -0.652 - C(g1_amplitude, g2_center) = 0.648 - C(g1_center, g2_center) = 0.621 - C(g1_sigma, g1_center) = 0.507 - C(exp_decay, g1_amplitude) = -0.507 + C(g1_sigma, g1_amplitude) = 0.824 + C(g2_sigma, g2_amplitude) = 0.815 + C(exp_amplitude, exp_decay) = -0.695 + C(g1_sigma, g2_center) = 0.684 + C(g1_center, g2_amplitude) = -0.669 + C(g1_center, g2_sigma) = -0.652 + C(g1_amplitude, g2_center) = 0.648 + C(g1_center, g2_center) = 0.621 + C(g1_sigma, g1_center) = 0.507 + C(exp_decay, g1_amplitude) = -0.507 This script is in the file ``doc_nistgauss2.py`` in the examples folder, and the fit result shown on the right above shows an improved initial diff --git a/doc/confidence.rst b/doc/confidence.rst index 54d2970ac..aadf54ce4 100644 --- a/doc/confidence.rst +++ b/doc/confidence.rst @@ -63,17 +63,17 @@ starting point:: >>> result = mini.minimize() >>> print(lmfit.fit_report(result.params)) [Variables]] - a: 0.09943895 +/- 0.000193 (0.19%) (init= 0.1) - b: 1.98476945 +/- 0.012226 (0.62%) (init= 1) + a: 0.09943895 +/- 0.000193 (0.19%) (init= 0.1) + b: 1.98476945 +/- 0.012226 (0.62%) (init= 1) [[Correlations]] (unreported correlations are < 0.100) - C(a, b) = 0.601 + C(a, b) = 0.601 Now it is just a simple function call to calculate the confidence intervals:: >>> ci = lmfit.conf_interval(mini, result) >>> lmfit.printfuncs.report_ci(ci) - 99.70% 95.00% 67.40% 0.00% 67.40% 95.00% 99.70% + 99.70% 95.00% 67.40% 0.00% 67.40% 95.00% 99.70% a 0.09886 0.09905 0.09925 0.09944 0.09963 0.09982 0.10003 b 1.94751 1.96049 1.97274 1.97741 1.99680 2.00905 2.02203 @@ -103,16 +103,16 @@ uncertainties and correlations which will report:: [[Variables]] - a1: 2.98622120 +/- 0.148671 (4.98%) (init= 2.986237) - a2: -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256) - t1: 1.30994233 +/- 0.131211 (10.02%) (init= 1.309932) - t2: 11.8240350 +/- 0.463164 (3.92%) (init= 11.82408) + a1: 2.98622120 +/- 0.148671 (4.98%) (init= 2.986237) + a2: -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256) + t1: 1.30994233 +/- 0.131211 (10.02%) (init= 1.309932) + t2: 11.8240350 +/- 0.463164 (3.92%) (init= 11.82408) [[Correlations]] (unreported correlations are < 0.500) - C(a2, t2) = 0.987 - C(a2, t1) = -0.925 - C(t1, t2) = -0.881 - C(a1, t1) = -0.599 - 95.00% 68.00% 0.00% 68.00% 95.00% + C(a2, t2) = 0.987 + C(a2, t1) = -0.925 + C(t1, t2) = -0.881 + C(a1, t1) = -0.599 + 95.00% 68.00% 0.00% 68.00% 95.00% a1 2.71850 2.84525 2.98622 3.14874 3.34076 a2 -4.63180 -4.46663 -4.33526 -4.22883 -4.14178 t2 10.82699 11.33865 11.82404 12.28195 12.71094 diff --git a/doc/constraints.rst b/doc/constraints.rst index 7d06b8d1b..33b14519e 100644 --- a/doc/constraints.rst +++ b/doc/constraints.rst @@ -7,27 +7,27 @@ Using Mathematical Constraints .. _asteval: http://newville.github.io/asteval/ Being able to fix variables to a constant value or place upper and lower -bounds on their values can greatly simplify modeling real data. These -capabilities are key to lmfit's Parameters. In addition, it is sometimes -highly desirable to place mathematical constraints on parameter values. -For example, one might want to require that two Gaussian peaks have the -same width, or have amplitudes that are constrained to add to some value. -Of course, one could rewrite the objective or model function to place such -requirements, but this is somewhat error prone, and limits the flexibility +bounds on their values can greatly simplify modeling real data. These +capabilities are key to lmfit's Parameters. In addition, it is sometimes +highly desirable to place mathematical constraints on parameter values. +For example, one might want to require that two Gaussian peaks have the +same width, or have amplitudes that are constrained to add to some value. +Of course, one could rewrite the objective or model function to place such +requirements, but this is somewhat error prone, and limits the flexibility so that exploring constraints becomes laborious. -To simplify the setting of constraints, Parameters can be assigned a -mathematical expression of other Parameters, builtin constants, and builtin -mathematical functions that will be used to determine its value. The -expressions used for constraints are evaluated using the `asteval`_ module, -which uses Python syntax, and evaluates the constraint expressions in a safe +To simplify the setting of constraints, Parameters can be assigned a +mathematical expression of other Parameters, builtin constants, and builtin +mathematical functions that will be used to determine its value. The +expressions used for constraints are evaluated using the `asteval`_ module, +which uses Python syntax, and evaluates the constraint expressions in a safe and isolated namespace. -This approach to mathematical constraints allows one to not have to write a -separate model function for two Gaussians where the two ``sigma`` values are +This approach to mathematical constraints allows one to not have to write a +separate model function for two Gaussians where the two ``sigma`` values are forced to be equal, or where amplitudes are related. Instead, one can write a -more general two Gaussian model (perhaps using :class:`GaussianModel`) and -impose such constraints on the Parameters for a particular fit. +more general two Gaussian model (perhaps using :class:`GaussianModel`) and +impose such constraints on the Parameters for a particular fit. Overview diff --git a/doc/faq.rst b/doc/faq.rst index f21a7a248..f29ac5b61 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -59,7 +59,7 @@ different arrays. As a bonus, the two lines share the 'offset' parameter:: model1 = params['offset'] + x * params['slope1'] model2 = params['offset'] + x * params['slope2'] - resid1 = dat1 - model1 + resid1 = dat1 - model1 resid2 = dat2 - model2 return numpy.concatenate((resid1, resid2)) diff --git a/doc/fitting.rst b/doc/fitting.rst index effa6f00f..442a4be3d 100644 --- a/doc/fitting.rst +++ b/doc/fitting.rst @@ -32,18 +32,18 @@ Writing a Fitting Function An important component of a fit is writing a function to be minimized -- the *objective function*. Since this function will be called by other routines, there are fairly stringent requirements for its call signature -and return value. In principle, your function can be any python callable, +and return value. In principle, your function can be any Python callable, but it must look like this: .. function:: func(params, *args, **kws): - calculate objective residual to be minimized from parameters. + Calculate objective residual to be minimized from parameters. - :param params: parameters. - :type params: :class:`Parameters`. - :param args: positional arguments. Must match ``args`` argument to :func:`minimize` - :param kws: keyword arguments. Must match ``kws`` argument to :func:`minimize` - :return: residual array (generally data-model) to be minimized in the least-squares sense. + :param params: Parameters. + :type params: :class:`Parameters` + :param args: Positional arguments. Must match ``args`` argument to :func:`minimize`. + :param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize`. + :return: Residual array (generally data-model) to be minimized in the least-squares sense. :rtype: numpy array. The length of this array cannot change between calls. @@ -62,30 +62,30 @@ method, effectively doing a least-squares optimization of the return values. Since the function will be passed in a dictionary of :class:`Parameters`, it is advisable to unpack these to get numerical values at the top of the function. A -simple way to do this is with :meth:`Parameters.valuesdict`, as with:: +simple way to do this is with :meth:`Parameters.valuesdict`, as shown below:: def residual(pars, x, data=None, eps=None): - # unpack parameters: - # extract .value attribute for each parameter - parvals = pars.valuesdict() - period = parvals['period'] - shift = parvals['shift'] - decay = parvals['decay'] + # unpack parameters: + # extract .value attribute for each parameter + parvals = pars.valuesdict() + period = parvals['period'] + shift = parvals['shift'] + decay = parvals['decay'] - if abs(shift) > pi/2: - shift = shift - sign(shift)*pi + if abs(shift) > pi/2: + shift = shift - sign(shift)*pi - if abs(period) < 1.e-10: - period = sign(period)*1.e-10 + if abs(period) < 1.e-10: + period = sign(period)*1.e-10 - model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay) + model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay) - if data is None: - return model - if eps is None: - return (model - data) - return (model - data)/eps + if data is None: + return model + if eps is None: + return (model - data) + return (model - data)/eps In this example, ``x`` is a positional (required) argument, while the ``data`` array is actually optional (so that the function returns the model @@ -98,8 +98,8 @@ to use the bounds on the :class:`Parameter` to do this:: but putting this directly in the function with:: - if abs(period) < 1.e-10: - period = sign(period)*1.e-10 + if abs(period) < 1.e-10: + period = sign(period)*1.e-10 is also a reasonable approach. Similarly, one could place bounds on the ``decay`` parameter to take values only between ``-pi/2`` and ``pi/2``. @@ -153,6 +153,8 @@ class as listed in the :ref:`Table of Supported Fitting Methods | Differential | ``differential_evolution`` | | Evolution | | +-----------------------+------------------------------------------------------------------+ + | Brute force method | ``brute`` | + +-----------------------+------------------------------------------------------------------+ .. note:: @@ -194,7 +196,7 @@ well formatted text tables you can execute:: with `results` being a `MinimizerResult` object. Note that the method :meth:`lmfit.parameter.Parameters.pretty_print` accepts several arguments -for customizing the output (e.g. column width, numeric format, etc.). +for customizing the output (e.g., column width, numeric format, etc.). .. autoclass:: MinimizerResult @@ -246,7 +248,7 @@ After a fit using using the :meth:`leastsq` method has completed successfully, standard errors for the fitted variables and correlations between pairs of fitted variables are automatically calculated from the covariance matrix. The standard error (estimated :math:`1\sigma` -error-bar) go into the :attr:`stderr` attribute of the Parameter. The +error-bar) goes into the :attr:`stderr` attribute of the Parameter. The correlations with all other variables will be put into the :attr:`correl` attribute of the Parameter -- a dictionary with keys for all other Parameters and values of the corresponding correlation. @@ -272,8 +274,8 @@ The :class:`MinimizerResult` includes the traditional chi-square and reduced chi :nowrap: \begin{eqnarray*} - \chi^2 &=& \sum_i^N r_i^2 \\ - \chi^2_\nu &=& = \chi^2 / (N-N_{\rm varys}) + \chi^2 &=& \sum_i^N r_i^2 \\ + \chi^2_\nu &=& = \chi^2 / (N-N_{\rm varys}) \end{eqnarray*} where :math:`r` is the residual array returned by the objective function @@ -288,7 +290,7 @@ Also included are the `Akaike Information Criterion held in the ``aic`` and ``bic`` attributes, respectively. These give slightly different measures of the relative quality for a fit, trying to balance quality of fit with the number of variable parameters used in the fit. -These are calculated as +These are calculated as: .. math:: :nowrap: @@ -318,17 +320,17 @@ used to abort a fit. .. function:: iter_cb(params, iter, resid, *args, **kws): - user-supplied function to be run at each iteration + User-supplied function to be run at each iteration. - :param params: parameters. + :param params: Parameters. :type params: :class:`Parameters`. - :param iter: iteration number + :param iter: Iteration number. :type iter: integer - :param resid: residual array. + :param resid: Residual array. :type resid: ndarray - :param args: positional arguments. Must match ``args`` argument to :func:`minimize` - :param kws: keyword arguments. Must match ``kws`` argument to :func:`minimize` - :return: residual array (generally data-model) to be minimized in the least-squares sense. + :param args: Positional arguments. Must match ``args`` argument to :func:`minimize` + :param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize` + :return: Residual array (generally data-model) to be minimized in the least-squares sense. :rtype: ``None`` for normal behavior, any value like ``True`` to abort fit. @@ -361,8 +363,11 @@ The Minimizer object has a few public methods: .. automethod:: Minimizer.prepare_fit -.. automethod:: Minimizer.emcee +.. automethod:: Minimizer.brute +For more information, check the examples in 'examples/lmfit_brute.py' + +.. automethod:: Minimizer.emcee .. _label-emcee: @@ -398,10 +403,10 @@ Solving with :func:`minimize` gives the Maximum Likelihood solution.:: >>> mi = lmfit.minimize(residual, p, method='Nelder') >>> lmfit.printfuncs.report_fit(mi.params, min_correl=0.5) [[Variables]] - a1: 2.98623688 (init= 4) - a2: -4.33525596 (init= 4) - t1: 1.30993185 (init= 3) - t2: 11.8240752 (init= 3) + a1: 2.98623688 (init= 4) + a2: -4.33525596 (init= 4) + t1: 1.30993185 (init= 3) + t2: 11.8240752 (init= 3) [[Correlations]] (unreported correlations are < 0.500) >>> plt.plot(x, y) >>> plt.plot(x, residual(mi.params) + y, 'r') @@ -455,18 +460,18 @@ You can see that we recovered the right uncertainty level on the data.:: median of posterior probability distribution ------------------------------------------ [[Variables]] - a1: 3.00975345 +/- 0.151034 (5.02%) (init= 2.986237) - a2: -4.35419204 +/- 0.127505 (2.93%) (init=-4.335256) - t1: 1.32726415 +/- 0.142995 (10.77%) (init= 1.309932) - t2: 11.7911935 +/- 0.495583 (4.20%) (init= 11.82408) - f: 0.09805494 +/- 0.004256 (4.34%) (init= 1) + a1: 3.00975345 +/- 0.151034 (5.02%) (init= 2.986237) + a2: -4.35419204 +/- 0.127505 (2.93%) (init=-4.335256) + t1: 1.32726415 +/- 0.142995 (10.77%) (init= 1.309932) + t2: 11.7911935 +/- 0.495583 (4.20%) (init= 11.82408) + f: 0.09805494 +/- 0.004256 (4.34%) (init= 1) [[Correlations]] (unreported correlations are < 0.100) - C(a2, t2) = 0.981 - C(a2, t1) = -0.927 - C(t1, t2) = -0.880 - C(a1, t1) = -0.519 - C(a1, a2) = 0.195 - C(a1, t2) = 0.146 + C(a2, t2) = 0.981 + C(a2, t1) = -0.927 + C(t1, t2) = -0.880 + C(a1, t1) = -0.519 + C(a1, a2) = 0.195 + C(a1, t2) = 0.146 >>> # find the maximum likelihood solution >>> highest_prob = np.argmax(res.lnprob) @@ -497,20 +502,20 @@ Getting and Printing Fit Reports .. function:: fit_report(result, modelpars=None, show_correl=True, min_correl=0.1) - generate and return text of report of best-fit values, uncertainties, + Generate and return text of report of best-fit values, uncertainties, and correlations from fit. :param result: :class:`MinimizerResult` object as returned by :func:`minimize`. :param modelpars: Parameters with "Known Values" (optional, default None) - :param show_correl: whether to show list of sorted correlations [``True``] - :param min_correl: smallest correlation absolute value to show [0.1] + :param show_correl: Whether to show list of sorted correlations [``True``] + :param min_correl: Smallest correlation absolute value to show [0.1] If the first argument is a :class:`Parameters` object, goodness-of-fit statistics will not be included. .. function:: report_fit(result, modelpars=None, show_correl=True, min_correl=0.1) - print text of report from :func:`fit_report`. + Print text of report from :func:`fit_report`. An example fit with report would be @@ -519,22 +524,22 @@ An example fit with report would be which would write out:: [[Fit Statistics]] - # function evals = 85 - # data points = 1001 - # variables = 4 - chi-square = 498.812 - reduced chi-square = 0.500 - Akaike info crit = -689.223 - Bayesian info crit = -669.587 + # function evals = 85 + # data points = 1001 + # variables = 4 + chi-square = 498.812 + reduced chi-square = 0.500 + Akaike info crit = -689.223 + Bayesian info crit = -669.587 [[Variables]] - amp: 13.9121944 +/- 0.141202 (1.01%) (init= 13) - period: 5.48507044 +/- 0.026664 (0.49%) (init= 2) - shift: 0.16203676 +/- 0.014056 (8.67%) (init= 0) - decay: 0.03264538 +/- 0.000380 (1.16%) (init= 0.02) + amp: 13.9121944 +/- 0.141202 (1.01%) (init= 13) + period: 5.48507044 +/- 0.026664 (0.49%) (init= 2) + shift: 0.16203676 +/- 0.014056 (8.67%) (init= 0) + decay: 0.03264538 +/- 0.000380 (1.16%) (init= 0.02) [[Correlations]] (unreported correlations are < 0.100) - C(period, shift) = 0.797 - C(amp, decay) = 0.582 - C(amp, shift) = -0.297 - C(amp, period) = -0.243 - C(shift, decay) = -0.182 - C(period, decay) = -0.150 + C(period, shift) = 0.797 + C(amp, decay) = 0.582 + C(amp, shift) = -0.297 + C(amp, period) = -0.243 + C(shift, decay) = -0.182 + C(period, decay) = -0.150 diff --git a/doc/installation.rst b/doc/installation.rst index 61e488e92..9cab8934b 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -13,7 +13,7 @@ Prerequisites The lmfit package requires Python, Numpy, and Scipy. Lmfit works with Python 2.7, 3.3, 3.4, and 3.5. Support for Python 2.6 -ended with Lmfit version 0.9.4. Scipy version 0.14 or higher is required, +ended with lmfit version 0.9.4. Scipy version 0.14 or higher is required, with 0.17 or higher recommended to be able to use the latest optimization features from scipy. Numpy version 1.5 or higher is required. diff --git a/doc/intro.rst b/doc/intro.rst index 71346289b..a977d7984 100644 --- a/doc/intro.rst +++ b/doc/intro.rst @@ -41,10 +41,10 @@ simple example, one might write an objective function like this:: def residual(vars, x, data, eps_data): amp = vars[0] phaseshift = vars[1] - freq = vars[2] + freq = vars[2] decay = vars[3] - model = amp * sin(x * freq + phaseshift) * exp(-x*x*decay) + model = amp * sin(x * freq + phaseshift) * exp(-x*x*decay) return (data-model)/eps_data @@ -79,13 +79,13 @@ including: Again, this is acceptable for small or one-off cases, but becomes painful if the fitting model needs to change. -These shortcomings are really do solely to the use of traditional arrays of +These shortcomings are really solely due to the use of traditional arrays of variables, as matches closely the implementation of the Fortran code. The lmfit module overcomes these shortcomings by using objects -- a core reason for working with Python. The key concept for lmfit is to use :class:`Parameter` objects instead of plain floating point numbers as the variables for the fit. By using :class:`Parameter` objects (or the closely related -:class:`Parameters` -- a dictionary of :class:`Parameter` objects), one can +:class:`Parameters` -- a dictionary of :class:`Parameter` objects), one can: a) forget about the order of variables and refer to Parameters by meaningful names. @@ -101,10 +101,10 @@ as:: def residual(params, x, data, eps_data): amp = params['amp'] pshift = params['phase'] - freq = params['frequency'] + freq = params['frequency'] decay = params['decay'] - model = amp * sin(x * freq + pshift) * exp(-x*x*decay) + model = amp * sin(x * freq + pshift) * exp(-x*x*decay) return (data-model)/eps_data diff --git a/doc/model.rst b/doc/model.rst index ea2894610..34d4ea471 100644 --- a/doc/model.rst +++ b/doc/model.rst @@ -10,21 +10,21 @@ A common use of least-squares minimization is *curve fitting*, where one has a parametrized model function meant to explain some phenomena and wants to adjust the numerical values for the model to most closely match some data. With :mod:`scipy`, such problems are commonly solved with -:scipydoc:`scipy.optimize.curve_fit`, which is a wrapper around -:scipydoc:`scipy.optimize.leastsq`. Since Lmfit's :func:`~lmfit.minimizer.minimize` is also -a high-level wrapper around :scipydoc:`scipy.optimize.leastsq` it can be used +:scipydoc:`optimize.curve_fit`, which is a wrapper around +:scipydoc:`optimize.leastsq`. Since lmfit's :func:`~lmfit.minimizer.minimize` is also +a high-level wrapper around :scipydoc:`optimize.leastsq` it can be used for curve-fitting problems, but requires more effort than using -:scipydoc:`scipy.optimize.curve_fit`. +:scipydoc:`optimize.curve_fit`. Here we discuss lmfit's :class:`Model` class. This takes a model function -- a function that calculates a model for some data -- and provides methods to create parameters for that model and to fit data using that model -function. This is closer in spirit to :scipydoc:`scipy.optimize.curve_fit`, +function. This is closer in spirit to :scipydoc:`optimize.curve_fit`, but with the advantages of using :class:`~lmfit.parameter.Parameters` and lmfit. In addition to allowing you turn any model function into a curve-fitting -method, Lmfit also provides canonical definitions for many known line shapes +method, lmfit also provides canonical definitions for many known line shapes such as Gaussian or Lorentzian peaks and Exponential decays that are widely used in many scientific domains. These are available in the :mod:`models` module that will be discussed in more detail in the next chapter @@ -49,7 +49,7 @@ own. We start with a simple definition of the model function: ... We want to fit this objective function to data :math:`y(x)` represented by the -arrays ``y`` and ``x``. This can be done easily with :scipydoc:`scipy.optimize.curve_fit`:: +arrays ``y`` and ``x``. This can be done easily with :scipydoc:`optimize.curve_fit`:: >>> from scipy.optimize import curve_fit >>> @@ -62,7 +62,7 @@ arrays ``y`` and ``x``. This can be done easily with :scipydoc:`scipy.optimize. We sample random data point, make an initial guess of the model -values, and run :scipydoc:`scipy.optimize.curve_fit` with the model function, +values, and run :scipydoc:`optimize.curve_fit` with the model function, data arrays, and initial guesses. The results returned are the optimal values for the parameters and the covariance matrix. It's simple and very useful. But it misses the benefits of lmfit. @@ -73,7 +73,7 @@ such a function would be fairly simple (essentially, ``data - model``, possibly with some weighting), and we would need to define and use appropriately named parameters. Though convenient, it is somewhat of a burden to keep the named parameter straight (on the other hand, with -:scipydoc:`scipy.optimize.curve_fit` you are required to remember the parameter +:scipydoc:`optimize.curve_fit` you are required to remember the parameter order). After doing this a few times it appears as a recurring pattern, and we can imagine automating this process. That's where the :class:`Model` class comes in. @@ -146,21 +146,21 @@ a :class:`ModelResult` object. As we will see below, this has many components, including a :meth:`fit_report` method, which will show:: [[Model]] - gaussian + gaussian [[Fit Statistics]] - # function evals = 33 - # data points = 101 - # variables = 3 - chi-square = 3.409 - reduced chi-square = 0.035 - Akaike info crit = -336.264 - Bayesian info crit = -328.418 + # function evals = 33 + # data points = 101 + # variables = 3 + chi-square = 3.409 + reduced chi-square = 0.035 + Akaike info crit = -336.264 + Bayesian info crit = -328.418 [[Variables]] - amp: 8.88021829 +/- 0.113594 (1.28%) (init= 5) - cen: 5.65866102 +/- 0.010304 (0.18%) (init= 5) - wid: 0.69765468 +/- 0.010304 (1.48%) (init= 1) + amp: 8.88021829 +/- 0.113594 (1.28%) (init= 5) + cen: 5.65866102 +/- 0.010304 (0.18%) (init= 5) + wid: 0.69765468 +/- 0.010304 (1.48%) (init= 1) [[Correlations]] (unreported correlations are < 0.100) - C(amp, wid) = 0.577 + C(amp, wid) = 0.577 The result will also have :attr:`init_fit` for the fit with the initial parameter values and a :attr:`best_fit` for the fit with the best fit @@ -182,7 +182,7 @@ Note that the model fitting was really performed with 2 lines of code:: These lines clearly express that we want to turn the ``gaussian`` function into a fitting model, and then fit the :math:`y(x)` data to this model, starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``. -This is much more expressive than :scipydoc:`scipy.optimize.curve_fit`:: +This is much more expressive than :scipydoc:`optimize.curve_fit`:: best_vals, covar = curve_fit(gaussian, x, y, p0=[5, 5, 1]) @@ -203,19 +203,19 @@ function as a fitting model. introspection to automatically converting argument names of the function to Parameter names. - :param func: model function to be wrapped + :param func: Model function to be wrapped. :type func: callable - :param independent_vars: list of argument names to ``func`` that are independent variables. + :param independent_vars: List of argument names to ``func`` that are independent variables. :type independent_vars: ``None`` (default) or list of strings. - :param param_names: list of argument names to ``func`` that should be made into Parameters. + :param param_names: List of argument names to ``func`` that should be made into Parameters. :type param_names: ``None`` (default) or list of strings - :param missing: how to handle missing values. + :param missing: How to handle missing values. :type missing: one of ``None`` (default), 'none', 'drop', or 'raise'. - :param prefix: prefix to add to all parameter names to distinguish components in a :class:`CompositeModel`. + :param prefix: Prefix to add to all parameter names to distinguish components in a :class:`CompositeModel`. :type prefix: string - :param name: name for the model. When ``None`` (default) the name is the same as the model function (``func``). + :param name: Name for the model. When ``None`` (default) the name is the same as the model function (``func``). :type name: ``None`` or string. - :param kws: additional keyword arguments to pass to model function. + :param kws: Additional keyword arguments to pass to model function. Of course, the model function will have to return an array that will be the @@ -228,11 +228,11 @@ specifying one or more independent variables. .. method:: Model.eval(params=None[, **kws]) - evaluate the model function for a set of parameters and inputs. + Evaluate the model function for a set of parameters and inputs. - :param params: parameters to use for fit. + :param params: Parameters to use for fit. :type params: ``None`` (default) or Parameters - :param kws: additional keyword arguments to pass to model function. + :param kws: Additional keyword arguments to pass to model function. :return: ndarray for model given the parameters and other arguments. If ``params`` is ``None``, the values for all parameters are expected to @@ -247,24 +247,24 @@ specifying one or more independent variables. .. method:: Model.fit(data[, params=None[, weights=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **kws]]]]]]) - perform a fit of the model to the ``data`` array with a set of + Perform a fit of the model to the ``data`` array with a set of parameters. - :param data: array of data to be fitted. + :param data: Array of data to be fitted. :type data: ndarray-like - :param params: parameters to use for fit. + :param params: Parameters to use for fit. :type params: ``None`` (default) or Parameters - :param weights: weights to use for residual calculation in fit. + :param weights: Weights to use for residual calculation in fit. :type weights: ``None`` (default) or ndarray-like. - :param method: name of fitting method to use. See :ref:`fit-methods-label` for details + :param method: Name of fitting method to use. See :ref:`fit-methods-label` for details. :type method: string (default ``leastsq``) - :param scale_covar: whether to automatically scale covariance matrix (``leastsq`` only) + :param scale_covar: Whether to automatically scale covariance matrix (``leastsq`` only). :type scale_covar: bool (default ``True``) - :param iter_cb: function to be called at each fit iteration. See :ref:`fit-itercb-label` for details. + :param iter_cb: Function to be called at each fit iteration. See :ref:`fit-itercb-label` for details. :type iter_cb: callable or ``None`` - :param verbose: print a message when a new parameter is created due to a *hint* + :param verbose: Print a message when a new parameter is created due to a *hint*. :type verbose: bool (default ``True``) - :param kws: additional keyword arguments to pass to model function. + :param kws: Additional keyword arguments to pass to model function. :return: :class:`ModelResult` object. If ``params`` is ``None``, the internal ``params`` will be used. If it @@ -283,9 +283,9 @@ specifying one or more independent variables. Guess starting values for model parameters. - :param data: data array used to guess parameter values + :param data: Data array used to guess parameter values. :type func: ndarray - :param kws: additional options to pass to model function. + :param kws: Additional options to pass to model function. :return: :class:`lmfit.parameter.Parameters` with guessed initial values for each parameter. by default this is left to raise a ``NotImplementedError``, but may be @@ -298,7 +298,7 @@ specifying one or more independent variables. Create a set of parameters for model. - :param kws: optional keyword/value pairs to set initial values for parameters. + :param kws: Optional keyword/value pairs to set initial values for parameters. :return: :class:`lmfit.parameter.Parameters`. The parameters may or may not have decent initial values for each @@ -307,21 +307,21 @@ specifying one or more independent variables. .. method:: Model.set_param_hint(name, value=None[, min=None[, max=None[, vary=True[, expr=None]]]]) - set *hints* to use when creating parameters with :meth:`Model.make_param` for + Set *hints* to use when creating parameters with :meth:`Model.make_param` for the named parameter. This is especially convenient for setting initial values. The ``name`` can include the models ``prefix`` or not. - :param name: parameter name. + :param name: Parameter name. :type name: string - :param value: value for parameter + :param value: Value for parameter. :type value: float - :param min: lower bound for parameter value - :type min: ``None`` or float - :param max: upper bound for parameter value - :type max: ``None`` or float - :param vary: whether to vary parameter in fit. + :param min: Lower bound for parameter value. + :type min: ``-np.inf`` or float + :param max: Upper bound for parameter value. + :type max: ``np.inf`` or float + :param vary: Whether to vary parameter in fit. :type vary: boolean - :param expr: mathematical expression for constraint + :param expr: Mathematical expression for constraint. :type expr: string See :ref:`model_param_hints_section`. @@ -339,27 +339,25 @@ specifying one or more independent variables. .. attribute:: independent_vars - list of strings for names of the independent variables. + List of strings for names of the independent variables. .. attribute:: missing - describes what to do for missing values. The choices are + Describes what to do for missing values. The choices are: - * ``None``: Do not check for null or missing values (default) + * ``None``: Do not check for null or missing values (default). * ``'none'``: Do not check for null or missing values. - * ``'drop'``: Drop null or missing observations in data. If pandas is - installed, ``pandas.isnull`` is used, otherwise :attr:`numpy.isnan` is used. - * ``'raise'``: Raise a (more helpful) exception when data contains null - or missing values. + * ``'drop'``: Drop null or missing observations in data. If pandas is installed, ``pandas.isnull`` is used, otherwise :attr:`numpy.isnan` is used. + * ``'raise'``: Raise a (more helpful) exception when data contains null or missing values. .. attribute:: name - name of the model, used only in the string representation of the + Name of the model, used only in the string representation of the model. By default this will be taken from the model function. .. attribute:: opts - extra keyword arguments to pass to model function. Normally this will + Extra keyword arguments to pass to model function. Normally this will be determined internally and should not be changed. .. attribute:: param_hints @@ -368,11 +366,11 @@ specifying one or more independent variables. .. attribute:: param_names - list of strings of parameter names. + List of strings of parameter names. .. attribute:: prefix - prefix used for name-mangling of parameter names. The default is ''. + Prefix used for name-mangling of parameter names. The default is ''. If a particular :class:`Model` has arguments ``amplitude``, ``center``, and ``sigma``, these would become the parameter names. Using a prefix of ``g1_`` would convert these parameter names to @@ -669,7 +667,7 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from .. method:: ModelResult.eval(params=None, **kwargs) - evaluate the model using parameters supplied (or the best-fit parameters + Evaluate the model using parameters supplied (or the best-fit parameters if not specified) and supplied independent variables. The ``**kwargs`` arguments can be used to update parameter values and/or independent variables. @@ -677,7 +675,7 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from .. method:: ModelResult.eval_components(**kwargs) - evaluate each component of a :class:`CompositeModel`, returning an + Evaluate each component of a :class:`CompositeModel`, returning an ordered dictionary of with the values for each component model. The returned dictionary will have keys of the model prefix or (if no prefix is given), the model name. The ``**kwargs`` arguments can be used to @@ -685,44 +683,43 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from .. method:: ModelResult.fit(data=None[, params=None[, weights=None[, method=None[, **kwargs]]]]) - fit (or re-fit), optionally changing ``data``, ``params``, ``weights``, + Fit (or re-fit), optionally changing ``data``, ``params``, ``weights``, or ``method``, or changing the independent variable(s) with the ``**kwargs`` argument. See :meth:`Model.fit` for argument descriptions, and note that any value of ``None`` defaults to the last used value. -.. method:: ModelResult.fit_report(modelpars=None[, show_correl=True[,`< min_correl=0.1]]) +.. method:: ModelResult.fit_report(modelpars=None[, show_correl=True[, min_correl=0.1]]) - return a printable fit report for the fit with fit statistics, best-fit + Return a printable fit report for the fit with fit statistics, best-fit values with uncertainties and correlations. As with :func:`fit_report`. - :param modelpars: Parameters with "Known Values" (optional, default None) - :param show_correl: whether to show list of sorted correlations [``True``] - :param min_correl: smallest correlation absolute value to show [0.1] + :param modelpars: Parameters with "Known Values" (optional, default None). + :param show_correl: Whether to show list of sorted correlations [``True``]. + :param min_correl: Smallest correlation absolute value to show [0.1]. .. method:: ModelResult.conf_interval(**kwargs) - calculate the confidence intervals for the variable parameters using + Calculate the confidence intervals for the variable parameters using :func:`confidence.conf_interval() `. All keyword arguments are passed to that function. The result is stored in :attr:`ci_out`, and so can be accessed without recalculating them. .. method:: ModelResult.ci_report(with_offset=True) - return a nicely formatted text report of the confidence intervals, as + Return a nicely formatted text report of the confidence intervals, as from :func:`ci_report() `. .. method:: ModelResult.eval_uncertainty(**kwargs) - evaluate the uncertainty of the *model function* from the + Evaluate the uncertainty of the *model function* from the uncertainties for the best-fit parameters. This can be used to give confidence bands for the model. :param params: Parameters, defaults to :attr:`params`. - :param sigma: confidence level, i.e., how many :math:`\sigma` values, default=1 - :returns: ndarray to be added/subtracted to best-fit to give - uncertaintay in the values for the model. + :param sigma: Confidence level, i.e. how many :math:`\sigma` values, default=1. + :returns: ndarray to be added/subtracted to best-fit to give uncertaintay in the values for the model. An example using this method:: @@ -740,35 +737,35 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from .. method:: ModelResult.plot(datafmt='o', fitfmt='-', initfmt='--', yerr=None, numpoints=None, fig=None, data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None, ax_fit_kws=None, fig_kws=None) - Plot the fit results and residuals using matplotlib, if available. The + Plot the fit results and residuals using Matplotlib, if available. The plot will include two panels, one showing the fit residual, and the other with the data points, the initial fit curve, and the best-fit curve. If the fit model included weights or if ``yerr`` is specified, errorbars will also be plotted. - :param datafmt: matplotlib format string for data curve. - :type datafmt: ``None`` or string. - :param fitfmt: matplotlib format string for best-fit curve. - :type fitfmt: ``None`` or string. - :param initfmt: matplotlib format string for initial curve. - :type intfmt: ``None`` or string. - :param yerr: array of uncertainties for data array. - :type yerr: ``None`` or ndarray. - :param numpoints: number of points to display + :param datafmt: Matplotlib format string for data curve. + :type datafmt: ``None`` or string + :param fitfmt: Matplotlib format string for best-fit curve. + :type fitfmt: ``None`` or string + :param initfmt: Matplotlib format string for initial curve. + :type intfmt: ``None`` or string + :param yerr: Array of uncertainties for data array. + :type yerr: ``None`` or ndarray + :param numpoints: Number of points to display :type numpoints: ``None`` or integer - :param fig: matplotlib Figure to plot on. + :param fig: Matplotlib Figure to plot on. :type fig: ``None`` or matplotlib.figure.Figure - :param data_kws: keyword arguments passed to plot for data curve. + :param data_kws: Keyword arguments passed to plot for data curve. :type data_kws: ``None`` or dictionary - :param fit_kws: keyword arguments passed to plot for best-fit curve. + :param fit_kws: Keyword arguments passed to plot for best-fit curve. :type fit_kws: ``None`` or dictionary - :param init_kws: keyword arguments passed to plot for initial curve. + :param init_kws: Keyword arguments passed to plot for initial curve. :type init_kws: ``None`` or dictionary - :param ax_res_kws: keyword arguments passed to creation of matplotlib axes for the residual plot. + :param ax_res_kws: Keyword arguments passed to creation of Matplotlib axes for the residual plot. :type ax_res_kws: ``None`` or dictionary - :param ax_fit_kws: keyword arguments passed to creation of matplotlib axes for the fit plot. + :param ax_fit_kws: Keyword arguments passed to creation of Matplotlib axes for the fit plot. :type ax_fit_kws: ``None`` or dictionary - :param fig_kws: keyword arguments passed to creation of matplotlib figure. + :param fig_kws: Keyword arguments passed to creation of Matplotlib figure. :type fig_kws: ``None`` or dictionary :returns: matplotlib.figure.Figure @@ -787,25 +784,25 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from model included weights or if ``yerr`` is specified, errorbars will also be plotted. - :param ax: matplotlib axes to plot on. - :type ax: ``None`` or matplotlib.axes.Axes. - :param datafmt: matplotlib format string for data curve. - :type datafmt: ``None`` or string. - :param fitfmt: matplotlib format string for best-fit curve. - :type fitfmt: ``None`` or string. - :param initfmt: matplotlib format string for initial curve. - :type intfmt: ``None`` or string. - :param yerr: array of uncertainties for data array. - :type yerr: ``None`` or ndarray. - :param numpoints: number of points to display + :param ax: Matplotlib axes to plot on. + :type ax: ``None`` or matplotlib.axes.Axes + :param datafmt: Matplotlib format string for data curve. + :type datafmt: ``None`` or string + :param fitfmt: Matplotlib format string for best-fit curve. + :type fitfmt: ``None`` or string + :param initfmt: Matplotlib format string for initial curve. + :type intfmt: ``None`` or string + :param yerr: Array of uncertainties for data array. + :type yerr: ``None`` or ndarray + :param numpoints: Number of points to display. :type numpoints: ``None`` or integer - :param data_kws: keyword arguments passed to plot for data curve. + :param data_kws: Keyword arguments passed to plot for data curve. :type data_kws: ``None`` or dictionary - :param fit_kws: keyword arguments passed to plot for best-fit curve. + :param fit_kws: Keyword arguments passed to plot for best-fit curve. :type fit_kws: ``None`` or dictionary - :param init_kws: keyword arguments passed to plot for initial curve. + :param init_kws: Keyword arguments passed to plot for initial curve. :type init_kws: ``None`` or dictionary - :param ax_kws: keyword arguments passed to creation of matplotlib axes. + :param ax_kws: Keyword arguments passed to creation of matplotlib axes. :type ax_kws: ``None`` or dictionary :returns: matplotlib.axes.Axes @@ -823,19 +820,19 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from Plot the fit residuals (data - fit) using matplotlib. If ``yerr`` is supplied or if the model included weights, errorbars will also be plotted. - :param ax: matplotlib axes to plot on. - :type ax: ``None`` or matplotlib.axes.Axes. - :param datafmt: matplotlib format string for data curve. - :type datafmt: ``None`` or string. - :param yerr: array of uncertainties for data array. - :type yerr: ``None`` or ndarray. - :param numpoints: number of points to display + :param ax: Matplotlib axes to plot on. + :type ax: ``None`` or matplotlib.axes.Axes + :param datafmt: Matplotlib format string for data curve. + :type datafmt: ``None`` or string + :param yerr: Array of uncertainties for data array. + :type yerr: ``None`` or ndarray + :param numpoints: Number of points to display :type numpoints: ``None`` or integer - :param data_kws: keyword arguments passed to plot for data curve. + :param data_kws: Keyword arguments passed to plot for data curve. :type data_kws: ``None`` or dictionary - :param fit_kws: keyword arguments passed to plot for best-fit curve. + :param fit_kws: Keyword arguments passed to plot for best-fit curve. :type fit_kws: ``None`` or dictionary - :param ax_kws: keyword arguments passed to creation of matplotlib axes. + :param ax_kws: Keyword arguments passed to creation of matplotlib axes. :type ax_kws: ``None`` or dictionary :returns: matplotlib.axes.Axes @@ -856,7 +853,7 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from .. attribute:: aic - floating point best-fit Akaike Information Criterion statistic (see :ref:`fit-results-label`). + Floating point best-fit Akaike Information Criterion statistic (see :ref:`fit-results-label`). .. attribute:: best_fit @@ -865,19 +862,19 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from .. attribute:: best_values - dictionary with parameter names as keys, and best-fit values as values. + Dictionary with parameter names as keys, and best-fit values as values. .. attribute:: bic - floating point best-fit Bayesian Information Criterion statistic (see :ref:`fit-results-label`). + Floating point best-fit Bayesian Information Criterion statistic (see :ref:`fit-results-label`). .. attribute:: chisqr - floating point best-fit chi-square statistic (see :ref:`fit-results-label`). + Floating point best-fit chi-square statistic (see :ref:`fit-results-label`). .. attribute:: ci_out - confidence interval data (see :ref:`confidence_chapter`) or `None` if + Confidence interval data (see :ref:`confidence_chapter`) or `None` if the confidence intervals have not been calculated. .. attribute:: covar @@ -890,28 +887,28 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from .. attribute:: errorbars - boolean for whether error bars were estimated by fit. + Boolean for whether error bars were estimated by fit. .. attribute:: ier - integer returned code from :scipydoc:`scipy.optimize.leastsq`. + Integer returned code from :scipydoc:`optimize.leastsq`. .. attribute:: init_fit - ndarray result of model function, evaluated at provided + Ndarray result of model function, evaluated at provided independent variables and with initial parameters. .. attribute:: init_params - initial parameters. + Initial parameters. .. attribute:: init_values - dictionary with parameter names as keys, and initial values as values. + Dictionary with parameter names as keys, and initial values as values. .. attribute:: iter_cb - optional callable function, to be called at each fit iteration. This + Optional callable function, to be called at each fit iteration. This must take take arguments of ``params, iter, resid, *args, **kws``, where ``params`` will have the current parameter values, ``iter`` the iteration, ``resid`` the current residual array, and ``*args`` and @@ -919,47 +916,47 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from .. attribute:: jacfcn - optional callable function, to be called to calculate jacobian array. + Optional callable function, to be called to calculate jacobian array. .. attribute:: lmdif_message - string message returned from :scipydoc:`scipy.optimize.leastsq`. + String message returned from :scipydoc:`optimize.leastsq`. .. attribute:: message - string message returned from :func:`~lmfit.minimizer.minimize`. + String message returned from :func:`~lmfit.minimizer.minimize`. .. attribute:: method - string naming fitting method for :func:`~lmfit.minimizer.minimize`. + String naming fitting method for :func:`~lmfit.minimizer.minimize`. .. attribute:: model - instance of :class:`Model` used for model. + Instance of :class:`Model` used for model. .. attribute:: ndata - integer number of data points. + Integer number of data points. .. attribute:: nfev - integer number of function evaluations used for fit. + Integer number of function evaluations used for fit. .. attribute:: nfree - integer number of free parameters in fit. + Integer number of free parameters in fit. .. attribute:: nvarys - integer number of independent, freely varying variables in fit. + Integer number of independent, freely varying variables in fit. .. attribute:: params - Parameters used in fit. Will have best-fit values. + Parameters used in fit. Will have best-fit values. .. attribute:: redchi - floating point reduced chi-square statistic (see :ref:`fit-results-label`). + Floating point reduced chi-square statistic (see :ref:`fit-results-label`). .. attribute:: residual @@ -967,11 +964,11 @@ These methods are all inherited from :class:`~lmfit.minimizer.Minimize` or from .. attribute:: scale_covar - boolean flag for whether to automatically scale covariance matrix. + Boolean flag for whether to automatically scale covariance matrix. .. attribute:: success - boolean value of whether fit succeeded. + Boolean value of whether fit succeeded. .. attribute:: weights @@ -1000,11 +997,11 @@ to model a peak with a background. For such a simple problem, we could just build a model that included both components:: def gaussian_plus_line(x, amp, cen, wid, slope, intercept): - "line + 1-d gaussian" + "line + 1-d gaussian" - gauss = (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2)) - line = slope * x + intercept - return gauss + line + gauss = (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2)) + line = slope * x + intercept + return gauss + line and use that with:: @@ -1016,8 +1013,8 @@ model function would have to be changed. As an alternative we could define a linear function:: def line(x, slope, intercept): - "a line" - return slope * x + intercept + "a line" + return slope * x + intercept and build a composite model with just:: @@ -1030,24 +1027,24 @@ This model has parameters for both component models, and can be used as: which prints out the results:: [[Model]] - (Model(gaussian) + Model(line)) + (Model(gaussian) + Model(line)) [[Fit Statistics]] - # function evals = 44 - # data points = 101 - # variables = 5 - chi-square = 2.579 - reduced chi-square = 0.027 - Akaike info crit = -360.457 - Bayesian info crit = -347.381 + # function evals = 44 + # data points = 101 + # variables = 5 + chi-square = 2.579 + reduced chi-square = 0.027 + Akaike info crit = -360.457 + Bayesian info crit = -347.381 [[Variables]] - amp: 8.45931061 +/- 0.124145 (1.47%) (init= 5) - cen: 5.65547872 +/- 0.009176 (0.16%) (init= 5) - intercept: -0.96860201 +/- 0.033522 (3.46%) (init= 1) - slope: 0.26484403 +/- 0.005748 (2.17%) (init= 0) - wid: 0.67545523 +/- 0.009916 (1.47%) (init= 1) + amp: 8.45931061 +/- 0.124145 (1.47%) (init= 5) + cen: 5.65547872 +/- 0.009176 (0.16%) (init= 5) + intercept: -0.96860201 +/- 0.033522 (3.46%) (init= 1) + slope: 0.26484403 +/- 0.005748 (2.17%) (init= 0) + wid: 0.67545523 +/- 0.009916 (1.47%) (init= 1) [[Correlations]] (unreported correlations are < 0.100) - C(amp, wid) = 0.666 - C(cen, intercept) = 0.129 + C(amp, wid) = 0.666 + C(cen, intercept) = 0.129 and shows the plot on the left. @@ -1098,11 +1095,11 @@ provides a simple way to build up complex models. binary operator (`op`). Additional keywords are passed to :class:`Model`. - :param left: left-hand side Model + :param left: Left-hand side Model. :type left: :class:`Model` - :param right: right-hand side Model + :param right: Right-hand side Model.l :type right: :class:`Model` - :param op: binary operator + :param op: Binary operator. :type op: callable, and taking 2 arguments (`left` and `right`). Normally, one does not have to explicitly create a :class:`CompositeModel`, @@ -1124,13 +1121,13 @@ convolution function, perhaps as:: import numpy as np def convolve(dat, kernel): - # simple convolution - npts = min(len(dat), len(kernel)) - pad = np.ones(npts) - tmp = np.concatenate((pad*dat[0], dat, pad*dat[-1])) - out = np.convolve(tmp, kernel, mode='valid') - noff = int((len(out) - npts)/2) - return (out[noff:])[:npts] + # simple convolution + npts = min(len(dat), len(kernel)) + pad = np.ones(npts) + tmp = np.concatenate((pad*dat[0], dat, pad*dat[-1])) + out = np.convolve(tmp, kernel, mode='valid') + noff = int((len(out) - npts)/2) + return (out[noff:])[:npts] which extends the data in both directions so that the convolving kernel function gives a valid result over the data range. Because this function @@ -1142,23 +1139,23 @@ binary operator. A full script using this technique is here: which prints out the results:: [[Model]] - (Model(jump) Model(gaussian)) + (Model(jump) Model(gaussian)) [[Fit Statistics]] - # function evals = 27 - # data points = 201 - # variables = 3 - chi-square = 22.091 - reduced chi-square = 0.112 - Akaike info crit = -437.837 - Bayesian info crit = -427.927 + # function evals = 27 + # data points = 201 + # variables = 3 + chi-square = 22.091 + reduced chi-square = 0.112 + Akaike info crit = -437.837 + Bayesian info crit = -427.927 [[Variables]] - mid: 5 (fixed) - sigma: 0.64118585 +/- 0.013233 (2.06%) (init= 1.5) - center: 4.51633608 +/- 0.009567 (0.21%) (init= 3.5) - amplitude: 0.62654849 +/- 0.001813 (0.29%) (init= 1) + mid: 5 (fixed) + sigma: 0.64118585 +/- 0.013233 (2.06%) (init= 1.5) + center: 4.51633608 +/- 0.009567 (0.21%) (init= 3.5) + amplitude: 0.62654849 +/- 0.001813 (0.29%) (init= 1) [[Correlations]] (unreported correlations are < 0.100) - C(center, amplitude) = 0.344 - C(sigma, amplitude) = 0.280 + C(center, amplitude) = 0.344 + C(sigma, amplitude) = 0.280 and shows the plots: diff --git a/doc/parameters.rst b/doc/parameters.rst index 09ed224b6..33a8c5e66 100644 --- a/doc/parameters.rst +++ b/doc/parameters.rst @@ -12,7 +12,7 @@ of lmfit. A :class:`Parameter` is the quantity to be optimized in all minimization problems, replacing the plain floating point number used in the optimization routines from :mod:`scipy.optimize`. A :class:`Parameter` has -a value that can be varied in the fit or have a fixed value, have upper +a value that can be varied in the fit or a fixed value, and can have upper and/or lower bounds. It can even have a value that is constrained by an algebraic expression of other Parameter values. Since :class:`Parameters` live outside the core optimization routines, they can be used in **all** @@ -42,19 +42,20 @@ Finally, the objective functions you write for lmfit will take an instance of The :class:`Parameter` class ======================================== -.. class:: Parameter(name=None[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]]) +.. class:: Parameter(name=None[, value=None[, vary=True[, min=-np.inf[, max=np.inf[, expr=None[, brute_step=None]]]]]]) create a Parameter object. - :param name: parameter name + :param name: Parameter name. :type name: ``None`` or string -- will be overwritten during fit if ``None``. - :param value: the numerical value for the parameter - :param vary: whether to vary the parameter or not. + :param value: The numerical value for the parameter. + :param vary: Whether to vary the parameter or not. :type vary: boolean (``True``/``False``) [default ``True``] - :param min: lower bound for value (``None`` = no lower bound). - :param max: upper bound for value (``None`` = no upper bound). - :param expr: mathematical expression to use to evaluate value during fit. + :param min: Lower bound for value (``-np.inf`` = no lower bound). + :param max: Upper bound for value (``np.inf`` = no upper bound). + :param expr: Mathematical expression to use to evaluate value during fit. :type expr: ``None`` or string + :param brute_step: Step size for grid points in brute force method (``0`` = no step size). Each of these inputs is turned into an attribute of the same name. @@ -65,11 +66,11 @@ The :class:`Parameter` class .. attribute:: stderr - the estimated standard error for the best-fit value. + The estimated standard error for the best-fit value. .. attribute:: correl - a dictionary of the correlation with the other fitted variables in the + A dictionary of the correlation with the other fitted variables in the fit, of the form:: {'decay': 0.404, 'phase': -0.020, 'frequency': 0.102} @@ -84,50 +85,59 @@ The :class:`Parameter` class .. index:: Removing a Constraint Expression - .. method:: set(value=None[, vary=None[, min=None[, max=None[, expr=None]]]]) + .. method:: set(value=None[, vary=None[, min=None[, max=None[, expr=None[, brute_step=None]]]]]) - set or update a Parameters value or other attributes. + set or update a Parameter value or other attributes. - :param name: parameter name - :param value: the numerical value for the parameter - :param vary: whether to vary the parameter or not. - :param min: lower bound for value - :param max: upper bound for value - :param expr: mathematical expression to use to evaluate value during fit. + :param name: Parameter name. + :param value: The numerical value for the parameter. + :param vary: Whether to vary the parameter or not. + :param min: Lower bound for value. + :param max: Upper bound for value. + :param expr: Mathematical expression to use to evaluate value during fit. + :param brute_step: Step size for grid points in brute force method. Each argument of :meth:`set` has a default value of ``None``, and will be set only if the provided value is not ``None``. You can use this to update some Parameter attribute without affecting others, for example:: - p1 = Parameter('a', value=2.0) - p2 = Parameter('b', value=0.0) - p1.set(min=0) - p2.set(vary=False) + p1 = Parameter('a', value=2.0) + p2 = Parameter('b', value=0.0) + p1.set(min=0) + p2.set(vary=False) to set a lower bound, or to set a Parameter as have a fixed value. Note that to use this approach to lift a lower or upper bound, doing:: - p1.set(min=0) - ..... - # now lift the lower bound - p1.set(min=None) # won't work! lower bound NOT changed + p1.set(min=0) + ..... + # now lift the lower bound + p1.set(min=None) # won't work! lower bound NOT changed won't work -- this will not change the current lower bound. Instead you'll have to use ``np.inf`` to remove a lower or upper bound:: - # now lift the lower bound - p1.set(min=-np.inf) # will work! + # now lift the lower bound + p1.set(min=-np.inf) # will work! Similarly, to clear an expression of a parameter, you need to pass an empty string, not ``None``. You also need to give a value and explicitly tell it to vary:: - p3 = Parameter('c', expr='(a+b)/2') - p3.set(expr=None) # won't work! expression NOT changed + p3 = Parameter('c', expr='(a+b)/2') + p3.set(expr=None) # won't work! expression NOT changed - # remove constraint expression - p3.set(value=1.0, vary=True, expr='') # will work! parameter now unconstrained + # remove constraint expression + p3.set(value=1.0, vary=True, expr='') # will work! parameter now unconstrained + + Finally, to clear the step size, you need to pass ``0`` (`zero`) not ``None``:: + + p4 = Parameter('d', value=5.0, brute_step=0.1)) + p4.set(brute_step=None) # won't work! step size NOT changed + + # remove step size + p4.set(brute_step=0) # will work! parameter does not have a step size defined The :class:`Parameters` class @@ -135,7 +145,7 @@ The :class:`Parameters` class .. class:: Parameters() - create a Parameters object. This is little more than a fancy ordered + Create a Parameters object. This is little more than a fancy ordered dictionary, with the restrictions that: 1. keys must be valid Python symbol names, so that they can be used in @@ -147,41 +157,41 @@ The :class:`Parameters` class Two methods are provided for convenient initialization of a :class:`Parameters`, and one for extracting :class:`Parameter` values into a plain dictionary. - .. method:: add(name[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]]) + .. method:: add(name[, value=None[, vary=True[, min=-np.inf[, max=np.inf[, expr=None[, brute_step=None]]]]]]) - add a named parameter. This creates a :class:`Parameter` + Add a named parameter. This creates a :class:`Parameter` object associated with the key `name`, with optional arguments passed to :class:`Parameter`:: - p = Parameters() - p.add('myvar', value=1, vary=True) + p = Parameters() + p.add('myvar', value=1, vary=True) .. method:: add_many(self, paramlist) - add a list of named parameters. Each entry must be a tuple + Add a list of named parameters. Each entry must be a tuple with the following entries:: - name, value, vary, min, max, expr + name, value, vary, min, max, expr, brute_step This method is somewhat rigid and verbose (no default values), but can be useful when initially defining a parameter list so that it looks table-like:: - p = Parameters() - # (Name, Value, Vary, Min, Max, Expr) - p.add_many(('amp1', 10, True, None, None, None), - ('cen1', 1.2, True, 0.5, 2.0, None), - ('wid1', 0.8, True, 0.1, None, None), - ('amp2', 7.5, True, None, None, None), - ('cen2', 1.9, True, 1.0, 3.0, None), - ('wid2', None, False, None, None, '2*wid1/3')) + p = Parameters() + # (Name, Value, Vary, Min, Max, Expr, Brute_step) + p.add_many(('amp1', 10, True, None, None, None, None), + ('cen1', 1.2, True, 0.5, 2.0, None, None), + ('wid1', 0.8, True, 0.1, None, None, None), + ('amp2', 7.5, True, None, None, None, None), + ('cen2', 1.9, True, 1.0, 3.0, None, 0.1), + ('wid2', None, False, None, None, '2*wid1/3', None)) .. automethod:: Parameters.pretty_print .. method:: valuesdict() - return an ordered dictionary of name:value pairs with the + Return an ordered dictionary of name:value pairs with the Paramater name as the key and Parameter value as value. This is distinct from the :class:`Parameters` itself, as the dictionary @@ -191,7 +201,7 @@ The :class:`Parameters` class .. method:: dumps(**kws) - return a JSON string representation of the :class:`Parameter` object. + Return a JSON string representation of the :class:`Parameter` object. This can be saved or used to re-create or re-set parameters, using the :meth:`loads` method. @@ -199,19 +209,19 @@ The :class:`Parameters` class .. method:: dump(file, **kws) - write a JSON representation of the :class:`Parameter` object to a file + Write a JSON representation of the :class:`Parameter` object to a file or file-like object in `file` -- really any object with a :meth:`write` method. Optional keywords are sent :py:func:`json.dumps`. .. method:: loads(sval, **kws) - use a JSON string representation of the :class:`Parameter` object in + Use a JSON string representation of the :class:`Parameter` object in `sval` to set all parameter settings. Optional keywords are sent :py:func:`json.loads`. .. method:: load(file, **kws) - read and use a JSON string representation of the :class:`Parameter` + Read and use a JSON string representation of the :class:`Parameter` object from a file or file-like object in `file` -- really any object with a :meth:`read` method. Optional keywords are sent :py:func:`json.loads`. @@ -231,10 +241,10 @@ can be simplified using the :class:`Parameters` :meth:`valuesdict` method, which would make the objective function ``fcn2min`` above look like:: def fcn2min(params, x, data): - """ model decaying sine wave, subtract data""" - v = params.valuesdict() + """ model decaying sine wave, subtract data""" + v = params.valuesdict() - model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay']) - return model - data + model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay']) + return model - data The results are identical, and the difference is a stylistic choice. diff --git a/lmfit/confidence.py b/lmfit/confidence.py index 8556d2a58..45257a1c2 100644 --- a/lmfit/confidence.py +++ b/lmfit/confidence.py @@ -79,7 +79,7 @@ def conf_interval(minimizer, result, p_names=None, sigmas=(1, 2, 3), Function to calculate the probability from the optimized chi-square. Default (``None``) uses built-in f_compare (F test). verbose: bool - print extra debuging information. Default is ``False``. + Print extra debuging information. Default is ``False``. Returns @@ -363,7 +363,7 @@ def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10, y : (ny)-array y-coordinates grid : (nx,ny)-array - grid contains the calculated probabilities. + Grid contains the calculated probabilities. Examples -------- diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py index fb74ea01a..0cabd0057 100644 --- a/lmfit/minimizer.py +++ b/lmfit/minimizer.py @@ -186,7 +186,7 @@ class MinimizerResult(object): This is a plain container (with no methods of its own) that simply holds the results of the minimization. Fit results include data such as status and error messages, fit statistics, - and the updated (i.e. best-fit) parameters themselves :attr:`params`. + and the updated (i.e., best-fit) parameters themselves :attr:`params`. The list of (possible) `MinimizerResult` attributes follows. @@ -202,14 +202,14 @@ class MinimizerResult(object): useful for understanding the the values in :attr:`init_vals` and :attr:`covar`. covar : numpy.ndarray - covariance matrix from minimization (`leastsq` only), with + Covariance matrix from minimization (`leastsq` only), with rows/columns using :attr:`var_names`. init_vals : list List of initial values for variable parameters using :attr:`var_names`. init_values : dict Dictionary of initial values for variable parameters. nfev : int - Number of function evaluations + Number of function evaluations. success : bool Boolean (``True``/``False``) for whether fit succeeded. errorbars : bool @@ -223,17 +223,17 @@ class MinimizerResult(object): nvarys : int Number of variables in fit :math:`N_{\\rm varys}`. ndata : int - Number of data points: :math:`N` + Number of data points: :math:`N`. nfree : int - Degrees of freedom in fit: :math:`N - N_{\\rm varys}` + Degrees of freedom in fit: :math:`N - N_{\\rm varys}`. residual : numpy.ndarray Residual array :math:`{\\rm Resid_i}`. Return value of the objective function. chisqr : float - Chi-square: :math:`\chi^2 = \sum_i^N [{\\rm Resid}_i]^2` + Chi-square: :math:`\chi^2 = \sum_i^N [{\\rm Resid}_i]^2`. redchi : float Reduced chi-square: - :math:`\chi^2_{\\nu}= {\chi^2} / {(N - N_{\\rm varys})}` + :math:`\chi^2_{\\nu}= {\chi^2} / {(N - N_{\\rm varys})}`. aic : float Akaike Information Criterion statistic. bic : float @@ -288,21 +288,21 @@ def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None, Parameters ---------- userfcn : callable - objective function that returns the residual (difference between + Objective function that returns the residual (difference between model and data) to be minimized in a least squares sense. The function must have the signature: `userfcn(params, *fcn_args, **fcn_kws)` params : :class:`lmfit.parameter.Parameters` object. - contains the Parameters for the model. + Contains the Parameters for the model. fcn_args : tuple, optional - positional arguments to pass to `userfcn`. + Positional arguments to pass to `userfcn`. fcn_kws : dict, optional - keyword arguments to pass to `userfcn`. + Keyword arguments to pass to `userfcn`. iter_cb : callable, optional Function to be called at each fit iteration. This function should have the signature: `iter_cb(params, iter, resid, *fcn_args, **fcn_kws)`, - where where `params` will have the current parameter values, `iter` + where `params` will have the current parameter values, `iter` the iteration, `resid` the current residual array, and `*fcn_args` and `**fcn_kws` as passed to the objective function. scale_covar : bool, optional @@ -315,7 +315,7 @@ def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None, - 'propagate' - the values returned from `userfcn` are un-altered - 'omit' - the non-finite values are filtered. reduce_fcn : str or callable, optional - function to convert a residual array to a scalar value for the scalar + Function to convert a residual array to a scalar value for the scalar minimizers. Optional values are (where `r` is the residual array): - None : sum of squares of residual [default] (r*r).sum() @@ -341,7 +341,7 @@ def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None, the objective function returns non-finite values then a `ValueError` will be raised because the underlying solvers cannot deal with them. - A common use for the `fcn_args` and `fcn_kwds` would be to pass in + A common use for the `fcn_args` and `fcn_kws` would be to pass in other data needed to calculate the residual, including such things as the data array, dependent variable, uncertainties in the data, and other data structures for the model calculation. @@ -591,10 +591,10 @@ def unprepare_fit(self): def scalar_minimize(self, method='Nelder-Mead', params=None, **kws): """ - Scalar minimization using :scipydoc:`scipy.optimize.minimize`. + Scalar minimization using :scipydoc:`optimize.minimize`. Perform fit with any of the scalar minimization algorithms supported by - :scipydoc:`scipy.optimize.minimize`. Default argument values are: + :scipydoc:`optimize.minimize`. Default argument values are: +-------------------------+-----------------+-----------------------------------------------------+ | :meth:`scalar_minimize` | Default Value | Description | @@ -629,7 +629,7 @@ def scalar_minimize(self, method='Nelder-Mead', params=None, **kws): params : Parameters, optional Parameters to use as starting points. kws : dict, optional - Minimizer options pass to :scipydoc:`scipy.optimize.minimize`. + Minimizer options pass to :scipydoc:`optimize.minimize`. Returns ------- @@ -651,7 +651,7 @@ def scalar_minimize(self, method='Nelder-Mead', params=None, **kws): for any of these methods, so are not supported separately for those designed to use bounds. However, if you use the differential_evolution method you must specify finite - (min, max) for each Parameter. + (min, max) for each varying Parameter. """ result = self.prepare_fit(params=params) @@ -1093,7 +1093,7 @@ def least_squares(self, params=None, **kws): When possible, this calculates the estimated uncertainties and variable correlations from the covariance matrix. - This method wraps :scipydoc:`scipy.optimize.least_squares`, which + This method wraps :scipydoc:`optimize.least_squares`, which has inbuilt support for bounds and robust loss functions. Parameters @@ -1102,7 +1102,7 @@ def least_squares(self, params=None, **kws): Parameters to use as starting points. kws : dict, optional Minimizer options to pass to - :scipydoc:`scipy.optimize.least_squares`. + :scipydoc:`optimize.least_squares`. Returns ------- @@ -1162,7 +1162,7 @@ def leastsq(self, params=None, **kws): When possible, this calculates the estimated uncertainties and variable correlations from the covariance matrix. - This method calls :scipydoc:`scipy.optimize.leastsq`. + This method calls :scipydoc:`optimize.leastsq`. By default, numerical derivatives are used, and the following arguments are set: @@ -1174,9 +1174,9 @@ def leastsq(self, params=None, **kws): +------------------+----------------+------------------------------------------------------------+ | ftol | 1.e-7 | Relative error in the desired sum of squares | +------------------+----------------+------------------------------------------------------------+ - | maxfev | 2000*(nvar+1) | maximum number of function calls (nvar= # of variables) | + | maxfev | 2000*(nvar+1) | Maximum number of function calls (nvar= # of variables) | +------------------+----------------+------------------------------------------------------------+ - | Dfun | ``None`` | function to call for Jacobian calculation | + | Dfun | ``None`` | Function to call for Jacobian calculation | +------------------+----------------+------------------------------------------------------------+ Parameters @@ -1184,7 +1184,7 @@ def leastsq(self, params=None, **kws): params : :class:`lmfit.parameter.Parameters`, optional Parameters to use as starting point. kws : dict, optional - Minimizer options to pass to :scipydoc:`scipy.optimize.leastsq`. + Minimizer options to pass to :scipydoc:`optimize.leastsq`. Returns ------- @@ -1332,8 +1332,20 @@ def leastsq(self, params=None, **kws): def brute(self, params=None, Ns=20, keep=50): """ - Use the ``brute force`` method, :scipydoc:`scipy.optimize.brute`, - to find the global minimum of a function. + Use the `brute` force method (:scipydoc:`optimize.brute`) to find the + global minimum of a function. The following parameters are passed to + :scipydoc:`optimize.brute` and cannot be changed: + + +-------------------+-------+-----------------------------------------------------------------------+ + | :meth:`brute` arg | Value | Description | + +===================+=======+=======================================================================+ + +-------------------+-------+-----------------------------------------------------------------------+ + | full_output | 1 | Return the evaluation grid and the objective function’s values on it. | + +-------------------+-------+-----------------------------------------------------------------------+ + | finish | None | No “polishing” function is to be used after the grid search. | + +-------------------+-------+-----------------------------------------------------------------------+ + | disp | False | Do not print convergence messages (when finish is not None). | + +-------------------+-------+-----------------------------------------------------------------------+ It assumes that the input Parameters have been initialized, and a function to minimize has been properly set up. @@ -1345,30 +1357,53 @@ def brute(self, params=None, Ns=20, keep=50): Parameters used to initialize the Minimizer object are used. Ns : int, optional Number of grid points along the axes, if not otherwise specified + (see Notes). keep : int, optional Number of best candidates from the brute force method that are - stored in the .candidates attribute. If 'all', then all grid - points from brute are stored as candidates. - - The following, default, minimizer options are passed to - :scipydoc:`scipy.optimize.brute` and cannot be changed: - full_output=1, finish=None, disp=False + stored in the `candidates` attribute. If 'all', then all grid + points from `brute` are stored as candidates. Returns ------- :class:`MinimizerResult` Object containing the parameters from the brute force method. - The return values (x0, fval, grid, Jout) from - `scipy.optimize.brute` are stored as ``brute_`` attributes. - The `MinimizerResult` also contains the ``candidates`` and - ``show_candidates`` attributes. The ``candidates`` attribute + The return values (`x0`, `fval`, `grid`, `Jout`) from + :scipydoc:`optimize.brute` are stored as `brute_` attributes. + The `MinimizerResult` also contains the `candidates` and + `show_candidates` attributes. The `candidates` attribute contains the parameters and chisqr from the brute force method as a namedtuple, ('Candidate', ['params', 'score']), sorted on the (lowest) chisqr value. To access the values for a particular - candidate use `result.candidate[#].params` or + candidate one can use `result.candidate[#].params` or `result.candidate[#].score`, where a lower # represents a better - candidate. The ``show_candidates`` attribute, will show the - candidates using the `pretty_print` method. + candidate. The `show_candidates` attribute, will show the + candidates using the :meth:`pretty_print` method. + + + .. versionadded:: 0.96 + + + Notes + ----- + The :meth:`brute` method evalutes the function at each point of a + multidimensional grid of points. The grid points are generated from the + parameter ranges using `Ns` and (optional) `brute_step`. + The implementation in :scipydoc:`optimize.brute` requires finite bounds + and the `range` is specified as a two-tuple `(min, max)` or slice-object + `(min, max, brute_step)`. A slice-object is used directly, whereas a + two-tuple is converted to a slice object that interpolates `Ns` points + from `min` to `max`, inclusive. + + In addition, the :meth:`brute` method in lmfit, handles three other + scenarios given below with their respective slice-object: + + - lower bound (:attr:`min`) and :attr:`brute_step` are specified: + range = (min, min + Ns*brute_step, brute_step). + - upper bound (:attr:`max`) and :attr:`brute_step` are specified: + range = (max - Ns*brute_step, max, brute_step). + - numerical value (:attr:`value`) and :attr:`brute_step` are specified: + range = (value - (Ns//2)*brute_step, value + (Ns//2)*brute_step, brute_step). + """ result = self.prepare_fit(params=params) result.method = 'brute' @@ -1521,7 +1556,7 @@ def _lnprior(theta, bounds): Parameters ---------- theta : sequence - float parameter values (only those being varied) + Float parameter values (only those being varied) bounds : np.ndarray Lower and upper bounds of parameters that are varying. Has shape (nvarys, 2). @@ -1548,7 +1583,7 @@ def _lnpost(theta, userfcn, params, var_names, bounds, userargs=(), Parameters ---------- theta : sequence - float parameter values (only those being varied) + Float parameter values (only those being varied) userfcn : callable User objective function params : lmfit.Parameters @@ -1725,15 +1760,15 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, Parameters ---------- fcn : callable - objective function to be minimized. When method is `leastsq` or + Objective function to be minimized. When method is `leastsq` or `least_squares`, the objective function should return an array of residuals (difference between model and data) to be minimized in a least squares sense. With the scalar methods the objective function can either return the residuals array or a single scalar value. The function must have the signature: `fcn(params, *args, **kws)` - params : :class:`lmfit.parameter.Parameters` object. - contains the Parameters for the model. + params : :class:`lmfit.parameter.Parameters` object + Contains the Parameters for the model. method : str, optional Name of the fitting method to use. Valid values are: @@ -1752,6 +1787,8 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, - `'dogleg'`: Dogleg - `'slsqp'`: Sequential Linear Squares Programming - `'differential_evolution'`: differential evolution + - `'brute'`: brute force method. + Uses `scipy.optimize.brute`. For more details on the fitting methods please refer to the `scipy docs `__. @@ -1759,7 +1796,7 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, args : tuple, optional Positional arguments to pass to `fcn`. kws : dict, optional - keyword arguments to pass to `fcn`. + Keyword arguments to pass to `fcn`. iter_cb : callable, optional Function to be called at each fit iteration. This function should have the signature `iter_cb(params, iter, resid, *args, **kws)`, @@ -1769,7 +1806,7 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, scale_covar : bool, optional Whether to automatically scale the covariance matrix (leastsq only). reduce_fcn : str or callable, optional - function to convert a residual array to a scalar value for the scalar + Function to convert a residual array to a scalar value for the scalar minimizers. See notes in `Minimizer`. fit_kws : dict, optional Options to pass to the minimizer being used. @@ -1794,12 +1831,12 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, squares of the array will be sent to the underlying fitting method, effectively doing a least-squares optimization of the return values. - A common use for `args` and `kwds` would be to pass in other + A common use for `args` and `kws` would be to pass in other data needed to calculate the residual, including such things as the data array, dependent variable, uncertainties in the data, and other data structures for the model calculation. - On output, the params will be unchanged. The best-fit values, and where + On output, `params` will be unchanged. The best-fit values, and where appropriate, estimated uncertainties and correlations, will all be contained in the returned :class:`MinimizerResult`. See :ref:`fit-results-label` for further details. diff --git a/lmfit/parameter.py b/lmfit/parameter.py index b0c88a52f..6256b6ecf 100644 --- a/lmfit/parameter.py +++ b/lmfit/parameter.py @@ -26,7 +26,7 @@ def isclose(x, y, rtol=1e-5, atol=1e-8): The truth whether two numbers are the same, within an absolute and relative tolerance. - i.e. abs(`x` - `y`) <= (`atol` + `rtol` * absolute(`y`)) + i.e., abs(`x` - `y`) <= (`atol` + `rtol` * absolute(`y`)) Parameters ---------- @@ -244,15 +244,15 @@ def pretty_print(self, oneline=False, colwidth=8, precision=4, fmt='g', oneline : boolean If True prints a one-line parameters representation. Default False. colwidth : int - column width for all except the first (i.e. name) column. - columns : list of strings - list of columns names to print. All values must be valid - :class:`Parameter` attributes. + Column width for all except the first (i.e. name) column. + precision : int + Number of digits to be printed after floating point. fmt : string - single-char numeric formatter. Valid values: 'f' floating point, + Single-char numeric formatter. Valid values: 'f' floating point, 'g' floating point and exponential, 'e' exponential. - precision : int - number of digits to be printed after floating point. + columns : list of strings + List of columns names to print. All values must be valid + :class:`Parameter` attributes. """ if oneline: print(self.pretty_repr(oneline=oneline)) @@ -436,9 +436,9 @@ class Parameter(object): vary : bool Whether the Parameter is fixed during a fit. min : float - Lower bound for value (None or -inf means no lower bound). + Lower bound for value (np.-inf means no lower bound). max : float - Upper bound for value (None or inf means no upper bound). + Upper bound for value (np.inf means no upper bound). expr : str An expression specifying constraints for the parameter. stderr : float @@ -459,9 +459,9 @@ def __init__(self, name=None, value=None, vary=True, vary : bool, optional Whether the Parameter is fixed during a fit. min : float, optional - Lower bound for value (None or -inf means no lower bound). + Lower bound for value (np.-inf means no lower bound). max : float, optional - Upper bound for value (None or inf means no upper bound). + Upper bound for value (np.inf means no upper bound). expr : str, optional Mathematical expression used to constrain the value during the fit. brute_step : float, optional diff --git a/lmfit/printfuncs.py b/lmfit/printfuncs.py index 896d1c4de..f656d3413 100644 --- a/lmfit/printfuncs.py +++ b/lmfit/printfuncs.py @@ -178,18 +178,18 @@ def report_fit(params, **kws): def ci_report(ci, with_offset=True, ndigits=5): - """return text of a report for confidence intervals + """Return text of a report for confidence intervals. Parameters ---------- with_offset : bool (default `True`) - whether to subtract best value from all other values. + Whether to subtract best value from all other values. ndigits : int (default 5) - number of significant digits to show + Number of significant digits to show. Returns ------- - text of formatted report on confidence intervals. + Text of formatted report on confidence intervals. """ maxlen = max([len(i) for i in ci]) buff = [] From a3be333a8d40f3cf0ec851872b203198ae460496 Mon Sep 17 00:00:00 2001 From: reneeotten Date: Sat, 4 Feb 2017 22:31:22 -0500 Subject: [PATCH 6/6] Update show_candidates() in the MinimizerResult class. You can now select which candidate to show; if no valid number is specified it will show all stored candidates. In order to get this to work, removed the @property decorator. --- lmfit/minimizer.py | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py index 0cabd0057..421257cd8 100644 --- a/lmfit/minimizer.py +++ b/lmfit/minimizer.py @@ -259,16 +259,20 @@ def flatchain(self): else: return None - @property - def show_candidates(self): + def show_candidates(self, candidate_nmb='all'): """ A pretty_print() representation of the candidates from the brute force - method. + method, showing all candidates (default) or the specified candidate-#. """ if hasattr(self, 'candidates'): - for i, candidate in enumerate(self.candidates): - print("\nCandidate # {}, chisqr = {:.3f}".format(i, candidate.score)) + try: + candidate = self.candidates[candidate_nmb] + print("\nCandidate #{}, chisqr = {:.3f}".format(candidate_nmb, candidate.score)) candidate.params.pretty_print() + except: + for i, candidate in enumerate(self.candidates): + print("\nCandidate #{}, chisqr = {:.3f}".format(i, candidate.score)) + candidate.params.pretty_print() class Minimizer(object): """A general minimizer for curve fitting and optimization. @@ -1360,8 +1364,8 @@ def brute(self, params=None, Ns=20, keep=50): (see Notes). keep : int, optional Number of best candidates from the brute force method that are - stored in the `candidates` attribute. If 'all', then all grid - points from `brute` are stored as candidates. + stored in the :attr:`candidates` attribute. If 'all', then all grid + points from :scipydoc:`optimize.brute` are stored as candidates. Returns ------- @@ -1369,15 +1373,15 @@ def brute(self, params=None, Ns=20, keep=50): Object containing the parameters from the brute force method. The return values (`x0`, `fval`, `grid`, `Jout`) from :scipydoc:`optimize.brute` are stored as `brute_` attributes. - The `MinimizerResult` also contains the `candidates` and - `show_candidates` attributes. The `candidates` attribute - contains the parameters and chisqr from the brute force method as - a namedtuple, ('Candidate', ['params', 'score']), sorted on the - (lowest) chisqr value. To access the values for a particular - candidate one can use `result.candidate[#].params` or - `result.candidate[#].score`, where a lower # represents a better - candidate. The `show_candidates` attribute, will show the - candidates using the :meth:`pretty_print` method. + The `MinimizerResult` also contains the `candidates` attribute and + `show_candidates()` method. The `candidates` attribute contains the + parameters and chisqr from the brute force method as a namedtuple, + ('Candidate', ['params', 'score']), sorted on the (lowest) chisqr + value. To access the values for a particular candidate one can use + `result.candidate[#].params` or `result.candidate[#].score`, where + a lower # represents a better candidate. The `show_candidates(#)` + uses the :meth:`pretty_print` method to show a specific candidate-# + or all candidates when no number is specified. .. versionadded:: 0.96