Permalink
Browse files

ENH: move the retall parameter as an option in minimize

  • Loading branch information...
1 parent 54ee3ad commit 4208897b74371daeceb32a48d77f1a2eb7f20d94 @dlax dlax committed Apr 18, 2012
Showing with 74 additions and 59 deletions.
  1. +28 −15 scipy/optimize/_minimize.py
  2. +25 −23 scipy/optimize/optimize.py
  3. +21 −21 scipy/optimize/tests/test_optimize.py
@@ -29,7 +29,7 @@
def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
hessp=None, bounds=None, constraints=(),
- options=dict(), callback=None, retall=False):
+ options=dict(), callback=None):
"""
Minimization of scalar function of one or more variables.
@@ -94,9 +94,6 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
- retall : bool, optional
- If True, return a list of the solution at each iteration. This is only
- done if `full_output` is True.
Returns
-------
@@ -131,7 +128,7 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
accept : int
Number of tests accepted.
allvecs : list
- Solution at each iteration (if ``retall == True``).
+ Solution at each iteration (if ``options['return_all'] == True``).
See also
--------
@@ -320,10 +317,10 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
callback is not None:
warn('Method %s does not support callback.' % method,
RuntimeWarning)
- # - retall
+ # - return_all
if meth in ['anneal', 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp'] and \
- retall:
- warn('Method %s does not support retall.' % method,
+ options.get('return_all', False):
+ warn('Method %s does not support the return_all option.' % method,
RuntimeWarning)
# fun also returns the jacobian
@@ -335,18 +332,16 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
jac = None
if meth == 'nelder-mead':
- return _minimize_neldermead(fun, x0, args, options, retall,
- callback)
+ return _minimize_neldermead(fun, x0, args, options, callback)
elif meth == 'powell':
- return _minimize_powell(fun, x0, args, options, retall, callback)
+ return _minimize_powell(fun, x0, args, options, callback)
elif meth == 'cg':
- return _minimize_cg(fun, x0, args, jac, options, retall, callback)
+ return _minimize_cg(fun, x0, args, jac, options, callback)
elif meth == 'bfgs':
- return _minimize_bfgs(fun, x0, args, jac, options, retall,
- callback)
+ return _minimize_bfgs(fun, x0, args, jac, options, callback)
elif meth == 'newton-cg':
return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, options,
- retall, callback)
+ callback)
elif meth == 'anneal':
return _minimize_anneal(fun, x0, args, options)
elif meth == 'l-bfgs-b':
@@ -505,6 +500,9 @@ def show_minimize_options(method=None):
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* Nelder-Mead options:
xtol : float
@@ -513,13 +511,22 @@ def show_minimize_options(method=None):
Relative error in ``fun(xopt)`` acceptable for convergence.
maxfev : int
Maximum number of function evaluations to make.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* Newton-CG options:
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* CG options:
gtol : float
@@ -529,6 +536,9 @@ def show_minimize_options(method=None):
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* Powell options:
xtol : float
@@ -539,6 +549,9 @@ def show_minimize_options(method=None):
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* Anneal options:
schedule : str
View
@@ -293,10 +293,10 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
- 'disp': disp}
+ 'disp': disp,
+ 'return_all': retall}
- x, info = _minimize_neldermead(func, x0, args, opts, retall=retall,
- callback=callback)
+ x, info = _minimize_neldermead(func, x0, args, opts, callback=callback)
if full_output:
retlist = x, info['fun'], info['nit'], info['nfev'], info['status']
if retall:
@@ -308,8 +308,7 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
else:
return x
-def _minimize_neldermead(func, x0, args=(), options={}, retall=0,
- callback=None):
+def _minimize_neldermead(func, x0, args=(), options={}, callback=None):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
@@ -335,6 +334,7 @@ def _minimize_neldermead(func, x0, args=(), options={}, retall=0,
maxiter = options.get('maxiter')
maxfun = options.get('maxfev')
disp = options.get('disp', False)
+ retall = options.get('return_all', False)
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
@@ -658,10 +658,10 @@ def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
'norm': norm,
'eps': epsilon,
'disp': disp,
- 'maxiter': maxiter}
+ 'maxiter': maxiter,
+ 'return_all': retall}
- x, info = _minimize_bfgs(f, x0, args, fprime, opts, retall=retall,
- callback=callback)
+ x, info = _minimize_bfgs(f, x0, args, fprime, opts, callback=callback)
if full_output:
retlist = x, info['fun'], info['jac'], info['hess'], \
@@ -675,8 +675,7 @@ def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
else:
return x
-def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, retall=0,
- callback=None):
+def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, callback=None):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
@@ -705,6 +704,7 @@ def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, retall=0,
epsilon = options.get('eps', _epsilon)
maxiter = options.get('maxiter')
disp = options.get('disp', False)
+ retall = options.get('return_all', False)
x0 = asarray(x0).flatten()
if x0.ndim == 0:
@@ -897,10 +897,10 @@ def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
'norm': norm,
'eps': epsilon,
'disp': disp,
- 'maxiter': maxiter}
+ 'maxiter': maxiter,
+ 'return_all': retall}
- x, info = _minimize_cg(f, x0, args, fprime, opts, retall=retall,
- callback=callback)
+ x, info = _minimize_cg(f, x0, args, fprime, opts, callback=callback)
if full_output:
retlist = x, info['fun'], info['nfev'], info['njev'], info['status']
@@ -913,8 +913,7 @@ def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
else:
return x
-def _minimize_cg(fun, x0, args=(), jac=None, options={}, retall=0,
- callback=None):
+def _minimize_cg(fun, x0, args=(), jac=None, options={}, callback=None):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
@@ -943,6 +942,7 @@ def _minimize_cg(fun, x0, args=(), jac=None, options={}, retall=0,
epsilon = options.get('eps', _epsilon)
maxiter = options.get('maxiter')
disp = options.get('disp', False)
+ retall = options.get('return_all', False)
x0 = asarray(x0).flatten()
if maxiter is None:
@@ -1135,10 +1135,11 @@ def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
opts = {'xtol': avextol,
'epsilon': epsilon,
'maxiter': maxiter,
- 'disp': disp}
+ 'disp': disp,
+ 'return_all': retall}
x, info = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, opts,
- retall=retall, callback=callback)
+ callback=callback)
if full_output:
retlist = x, info['fun'], info['nfev'], info['njev'], \
@@ -1153,7 +1154,7 @@ def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
return x
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
- options={}, retall=0, callback=None):
+ options={}, callback=None):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
@@ -1185,6 +1186,7 @@ def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
epsilon = options.get('eps', _epsilon)
maxiter = options.get('maxiter')
disp = options.get('disp', False)
+ retall = options.get('return_all', False)
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
@@ -1986,10 +1988,10 @@ def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
- 'direc': direc}
+ 'direc': direc,
+ 'return_all': retall}
- x, info = _minimize_powell(func, x0, args, opts, retall=retall,
- callback=callback)
+ x, info = _minimize_powell(func, x0, args, opts, callback=callback)
if full_output:
retlist = x, info['fun'], info['direc'], info['nit'], \
@@ -2003,8 +2005,7 @@ def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
else:
return x
-def _minimize_powell(func, x0, args=(), options={}, retall=0,
- callback=None):
+def _minimize_powell(func, x0, args=(), options={}, callback=None):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
@@ -2033,6 +2034,7 @@ def _minimize_powell(func, x0, args=(), options={}, retall=0,
maxfun = options.get('maxfev')
disp = options.get('disp', False)
direc = options.get('direc')
+ retall = options.get('return_all', False)
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
@@ -67,11 +67,11 @@ def hessp(self, x, p):
def test_cg(self, use_wrapper=False):
""" conjugate gradient optimization routine """
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='CG',
- jac=self.grad, options=opts,
- retall=False)
+ jac=self.grad, options=opts)
fopt, func_calls, grad_calls, warnflag = \
info['fun'], info['nfev'], info['njev'], info['status']
@@ -100,11 +100,11 @@ def test_cg(self, use_wrapper=False):
def test_bfgs(self, use_wrapper=False):
""" Broyden-Fletcher-Goldfarb-Shanno optimization routine """
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
params, info = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS',
- args=(), options=opts,
- retall=False)
+ args=(), options=opts)
fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
info['fun'], info['jac'], info['hess'], info['nfev'], \
@@ -176,11 +176,11 @@ def test_powell(self, use_wrapper=False):
""" Powell (direction set) optimization routine
"""
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='Powell',
- options=opts,
- retall=False)
+ options=opts)
fopt, direc, numiter, func_calls, warnflag = \
info['fun'], info['direc'], info['nit'], info['nfev'], \
info['status']
@@ -219,11 +219,11 @@ def test_neldermead(self, use_wrapper=False):
""" Nelder-Mead simplex algorithm
"""
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='Nelder-mead',
- options=opts,
- retall=False)
+ options=opts)
fopt, numiter, func_calls, warnflag = \
info['fun'], info['nit'], info['nfev'], info['status']
else:
@@ -251,11 +251,11 @@ def test_ncg(self, use_wrapper=False):
""" line-search Newton conjugate gradient optimization routine
"""
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
- args=(), options=opts,
- retall=False)[0]
+ args=(), options=opts)[0]
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
@@ -283,12 +283,12 @@ def test_ncg(self, use_wrapper=False):
def test_ncg_hess(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian """
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess = self.hess,
- args=(), options=opts,
- retall=False)[0]
+ args=(), options=opts)[0]
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess = self.hess,
@@ -317,12 +317,12 @@ def test_ncg_hess(self, use_wrapper=False):
def test_ncg_hessp(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian times a vector p """
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp = self.hessp,
- args=(), options=opts,
- retall=False)[0]
+ args=(), options=opts)[0]
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p = self.hessp,

0 comments on commit 4208897

Please sign in to comment.