Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Merge pull request #196 from dlaxalde:enh/optimize/simplify-minimize

  DOC: update optimize tutorial w.r.t. changes in minimize/minimize_scalar signature
  DOC: update minimize and minimize_scalar docstring and examples wrt new result format
  ENH: define and use Result to store results of optimization wrappers
  FIX: don't use {} as default value for options parameter in optimize
  ENH: move the retall parameter as an option in minimize
  ENH: drop full_output parameter from minimize_scalar
  ENH: drop full_output parameter from minimize
  • Loading branch information...
commit 98c46c92f53871480decac60a604961b61c0a7d7 2 parents de27171 + b480617
@dlax dlax authored
View
46 doc/source/tutorial/optimize.rst
@@ -70,14 +70,14 @@ parameter):
... return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)
>>> x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
- >>> xopt = minimize(rosen, x0, method='nelder-mead',
- ... options={'xtol': 1e-8, 'disp': True})
+ >>> res = minimize(rosen, x0, method='nelder-mead',
+ ... options={'xtol': 1e-8, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 339
Function evaluations: 571
- >>> print xopt
+ >>> print res.x
[ 1. 1. 1. 1. 1.]
The simplex algorithm is probably the simplest way to minimize a fairly
@@ -133,14 +133,14 @@ This gradient information is specified in the :func:`minimize` function
through the ``jac`` parameter as illustrated below.
- >>> xopt = minimize(rosen, x0, method='BFGS', jac=rosen_der,
- ... options={'disp': True})
+ >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
+ ... options={'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 51
Function evaluations: 63
Gradient evaluations: 63
- >>> print xopt
+ >>> print res.x
[ 1. 1. 1. 1. 1.]
@@ -217,15 +217,16 @@ the function using Newton-CG method is shown in the following example:
... H = H + np.diag(diagonal)
... return H
- >>> xopt = minimize(rosen, x0, method='Newton-CG', jac=rosen_der, hess=rosen_hess,
- ... options={'avextol': 1e-8, 'disp': True})
+ >>> res = minimize(rosen, x0, method='Newton-CG',
+ ... jac=rosen_der, hess=rosen_hess,
+ ... options={'avextol': 1e-8, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 19
Function evaluations: 22
Gradient evaluations: 19
Hessian evaluations: 19
- >>> print xopt
+ >>> print res.x
[ 1. 1. 1. 1. 1.]
@@ -264,15 +265,16 @@ Rosenbrock function using :func:`minimize` follows:
... Hp[-1] = -400*x[-2]*p[-2] + 200*p[-1]
... return Hp
- >>> xopt = minimize(rosen, x0, method='Newton-CG', jac=rosen_der, hess=rosen_hess_p,
- ... options={'avextol': 1e-8, 'disp': True})
+ >>> res = minimize(rosen, x0, method='Newton-CG',
+ ... jac=rosen_der, hess=rosen_hess_p,
+ ... options={'avextol': 1e-8, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 20
Function evaluations: 23
Gradient evaluations: 20
Hessian evaluations: 44
- >>> print xopt
+ >>> print res.x
[ 1. 1. 1. 1. 1.]
@@ -341,26 +343,26 @@ Then constraints are defined as a sequence of dictionaries, with keys
Now an unconstrained optimization can be performed as:
- >>> xopt = minimize(func, [-1.0,1.0], args=(-1.0,), jac=func_deriv,
- ... method='SLSQP', options={'disp': True})
+ >>> res = minimize(func, [-1.0,1.0], args=(-1.0,), jac=func_deriv,
+ ... method='SLSQP', options={'disp': True})
Optimization terminated successfully. (Exit mode 0)
Current function value: -2.0
Iterations: 4
Function evaluations: 5
Gradient evaluations: 4
- >>> print xopt
+ >>> print res.x
[ 2. 1.]
and a constrained optimization as:
- >>> xopt = minimize(func, [-1.0,1.0], args=(-1.0,), jac=func_deriv,
- constraints=cons, method='SLSQP', options={'disp': True})
+ >>> res = minimize(func, [-1.0,1.0], args=(-1.0,), jac=func_deriv,
+ ... constraints=cons, method='SLSQP', options={'disp': True})
Optimization terminated successfully. (Exit mode 0)
Current function value: -1.00000018311
Iterations: 9
Function evaluations: 14
Gradient evaluations: 9
- >>> print xopt
+ >>> print res.x
[ 1.00000009 1. ]
@@ -492,8 +494,8 @@ Here is an example:
>>> from scipy.optimize import minimize_scalar
>>> f = lambda x: (x - 2) * (x + 1)**2
- >>> xmin = minimize_scalar(f, method='brent')
- >>> print xmin
+ >>> res = minimize_scalar(f, method='brent')
+ >>> print res.x
1.0
@@ -513,8 +515,8 @@ For example, to find the minimum of :math:`J_{1}\left( x \right)` near
:math:`x_{\textrm{min}}=5.3314` :
>>> from scipy.special import j1
- >>> xmin = minimize_scalar(j1, bs=(4, 7), method='bounded')
- >>> print xmin
+ >>> res = minimize_scalar(j1, bs=(4, 7), method='bounded')
+ >>> print res.x
5.33144184241
View
178 scipy/optimize/_minimize.py
@@ -29,8 +29,7 @@
def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
hessp=None, bounds=None, constraints=(),
- options=dict(), full_output=False, callback=None,
- retall=False):
+ options=None, callback=None):
"""
Minimization of scalar function of one or more variables.
@@ -101,49 +100,19 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
disp : bool
Set to True to print convergence messages.
For method-specific options, see `show_minimize_options`.
- full_output : bool, optional
- If True, return optional outputs. Default is False.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
- retall : bool, optional
- If True, return a list of the solution at each iteration. This is only
- done if `full_output` is True.
Returns
-------
- xopt : ndarray
- The solution.
- info : dict
- A dictionary of optional outputs (depending on the chosen method)
- with the keys:
- solution : ndarray
- The solution (same as `xopt`).
- success : bool
- Boolean flag indicating if a solution was found.
- status : int
- An integer flag indicating the type of termination. Its
- value depends on the underlying solver. Refer to `message`
- for more information.
- message : str
- A string message giving information about the cause of the
- termination.
- fun, jac, hess : ndarray
- Values of objective function, Jacobian and Hessian (if
- available).
- nfev, njev, nhev: int
- Number of evaluations of the objective functions and of its
- jacobian and hessian.
- nit: int
- Number of iterations.
- direc: ndarray
- Current set of direction vectors for the Powell method.
- T : float
- Final temperature for simulated annealing.
- accept : int
- Number of tests accepted.
- allvecs : list
- Solution at each iteration (if ``retall == True``).
+ res : Result
+ The optimization result represented as a ``Result`` object.
+ Important attributes are: ``x`` the solution array, ``success`` a
+ Boolean flag indicating if the optimizer exited successfully and
+ ``message`` which describes the cause of the termination. See
+ `Result` for a description of other attributes.
+
See also
--------
@@ -258,26 +227,25 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
- >>> xopt = minimize(rosen, x0, method='Nelder-Mead')
- Optimization terminated successfully.
- Current function value: 0.000066
- Iterations: 141
- Function evaluations: 243
- >>> print xopt
+ >>> res = minimize(rosen, x0, method='Nelder-Mead')
+ >>> res.x
[ 1. 1. 1. 1. 1.]
Now using the *BFGS* algorithm, using the first derivative and a few
options:
- >>> xopt, info = minimize(rosen, x0, method='BFGS', jac=rosen_der,
- ... options={'gtol': 1e-6, 'disp': False},
- ... full_output=True)
-
- >>> print info['message']
+ >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
+ ... options={'gtol': 1e-6, 'disp': True})
Optimization terminated successfully.
- >>> print info['solution']
+ Current function value: 0.000000
+ Iterations: 52
+ Function evaluations: 64
+ Gradient evaluations: 64
+ >>> res.x
[ 1. 1. 1. 1. 1.]
- >>> print info['hess']
+ >>> print res.message
+ Optimization terminated successfully.
+ >>> res.hess
[[ 0.00749589 0.01255155 0.02396251 0.04750988 0.09495377]
[ 0.01255155 0.02510441 0.04794055 0.09502834 0.18996269]
[ 0.02396251 0.04794055 0.09631614 0.19092151 0.38165151]
@@ -302,13 +270,15 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
The optimization problem is solved using the SLSQP method as:
- >>> xopt, info = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
- ... constraints=cons, full_output=True)
+ >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
+ ... constraints=cons)
It should converge to the theoretical solution (1.4 ,1.7).
"""
meth = method.lower()
+ if options is None:
+ options = {}
# check if optional parameters are supported by the selected method
# - jac
if meth in ['nelder-mead', 'powell', 'anneal', 'cobyla'] and bool(jac):
@@ -334,10 +304,10 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
callback is not None:
warn('Method %s does not support callback.' % method,
RuntimeWarning)
- # - retall
+ # - return_all
if meth in ['anneal', 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp'] and \
- retall:
- warn('Method %s does not support retall.' % method,
+ options.get('return_all', False):
+ warn('Method %s does not support the return_all option.' % method,
RuntimeWarning)
# fun also returns the jacobian
@@ -349,40 +319,33 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
jac = None
if meth == 'nelder-mead':
- return _minimize_neldermead(fun, x0, args, options, full_output,
- retall, callback)
+ return _minimize_neldermead(fun, x0, args, options, callback)
elif meth == 'powell':
- return _minimize_powell(fun, x0, args, options, full_output,
- retall, callback)
+ return _minimize_powell(fun, x0, args, options, callback)
elif meth == 'cg':
- return _minimize_cg(fun, x0, args, jac, options, full_output,
- retall, callback)
+ return _minimize_cg(fun, x0, args, jac, options, callback)
elif meth == 'bfgs':
- return _minimize_bfgs(fun, x0, args, jac, options, full_output,
- retall, callback)
+ return _minimize_bfgs(fun, x0, args, jac, options, callback)
elif meth == 'newton-cg':
return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, options,
- full_output, retall, callback)
+ callback)
elif meth == 'anneal':
- return _minimize_anneal(fun, x0, args, options, full_output)
+ return _minimize_anneal(fun, x0, args, options)
elif meth == 'l-bfgs-b':
- return _minimize_lbfgsb(fun, x0, args, jac, bounds, options,
- full_output)
+ return _minimize_lbfgsb(fun, x0, args, jac, bounds, options)
elif meth == 'tnc':
- return _minimize_tnc(fun, x0, args, jac, bounds, options,
- full_output)
+ return _minimize_tnc(fun, x0, args, jac, bounds, options)
elif meth == 'cobyla':
- return _minimize_cobyla(fun, x0, args, constraints, options,
- full_output)
+ return _minimize_cobyla(fun, x0, args, constraints, options)
elif meth == 'slsqp':
return _minimize_slsqp(fun, x0, args, jac, bounds,
- constraints, options, full_output)
+ constraints, options)
else:
raise ValueError('Unknown solver %s' % method)
def minimize_scalar(fun, bracket=None, bounds=None, args=(),
- method='brent', options=dict(), full_output=False):
+ method='brent', options=None):
"""
Minimization of scalar function of one variable.
@@ -423,31 +386,15 @@ def minimize_scalar(fun, bracket=None, bounds=None, args=(),
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
- full_output : bool, optional
- If True, return optional outputs. Default is False.
Returns
-------
- xopt : ndarray
- The solution.
- info : dict
- A dictionary of optional outputs (depending on the chosen method)
- with the keys:
- success : bool
- Boolean flag indicating if a solution was found.
- status : int
- An integer flag indicating the type of termination. Its
- value depends on the underlying solver. Refer to `message`
- for more information.
- message : str
- A string message giving information about the cause of the
- termination.
- fun : float
- Values of objective function.
- nfev: int
- Number of evaluations of the objective function.
- nit: int
- Number of iterations.
+ res : Result
+ The optimization result represented as a ``Result`` object.
+ Important attributes are: ``x`` the solution array, ``success`` a
+ Boolean flag indicating if the optimizer exited successfully and
+ ``message`` which describes the cause of the termination. See
+ `Result` for a description of other attributes.
See also
--------
@@ -480,32 +427,31 @@ def minimize_scalar(fun, bracket=None, bounds=None, args=(),
Using the *Brent* method, we find the local minimum as:
>>> from scipy.optimize import minimize_scalar
- >>> xl = minimize_scalar(f)
- >>> xl
+ >>> res = minimize_scalar(f)
+ >>> res.x
1.28077640403
Using the *Bounded* method, we find a local minimum with specified
bounds as:
- >>> xc = minimize_scalar(f, bounds=(-3, -1), method='bounded')
- >>> xc
+ >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
+ >>> res.x
-2.0000002026
"""
meth = method.lower()
+ if options is None:
+ options = {}
if meth == 'brent':
- return _minimize_scalar_brent(fun, bracket, args, options,
- full_output)
+ return _minimize_scalar_brent(fun, bracket, args, options)
elif meth == 'bounded':
if bounds is None:
raise ValueError('The `bounds` parameter is mandatory for '
'method `bounded`.')
- return _minimize_scalar_bounded(fun, bounds, args, options,
- full_output)
+ return _minimize_scalar_bounded(fun, bounds, args, options)
elif meth == 'golden':
- return _minimize_scalar_golden(fun, bracket, args, options,
- full_output)
+ return _minimize_scalar_golden(fun, bracket, args, options)
else:
raise ValueError('Unknown solver %s' % method)
@@ -534,6 +480,9 @@ def show_minimize_options(method=None):
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* Nelder-Mead options:
xtol : float
@@ -542,6 +491,9 @@ def show_minimize_options(method=None):
Relative error in ``fun(xopt)`` acceptable for convergence.
maxfev : int
Maximum number of function evaluations to make.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* Newton-CG options:
xtol : float
@@ -549,6 +501,12 @@ def show_minimize_options(method=None):
convergence.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* CG options:
gtol : float
@@ -558,6 +516,9 @@ def show_minimize_options(method=None):
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* Powell options:
xtol : float
@@ -568,6 +529,9 @@ def show_minimize_options(method=None):
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
+ return_all : bool
+ If True, return a list of the solution at each iteration. This is only
+ done if `full_output` is True.
* Anneal options:
schedule : str
View
49 scipy/optimize/anneal.py
@@ -4,8 +4,8 @@
import numpy
from numpy import asarray, tan, exp, ones, squeeze, sign, \
- all, log, sqrt, pi, shape, array, minimum, where
-from numpy import random
+ all, log, sqrt, pi, shape, array, minimum, where, random
+from optimize import Result
__all__ = ['anneal']
@@ -303,17 +303,15 @@ def anneal(func, x0, args=(), schedule='fast', full_output=0,
'dwell' : dwell,
'disp' : disp}
- # call _minimize_anneal full_output=True in order to always retrieve
- # retval (aka info['status'])
- x, info = _minimize_anneal(func, x0, args, opts, full_output=True)
+ res = _minimize_anneal(func, x0, args, opts)
if full_output:
- return x, info['fun'], info['T'], info['nfev'], info['nit'], \
- info['accept'], info['status']
+ return res['x'], res['fun'], res['T'], res['nfev'], res['nit'], \
+ res['accept'], res['status']
else:
- return x, info['status']
+ return res['x'], res['status']
-def _minimize_anneal(func, x0, args=(), options={}, full_output=0):
+def _minimize_anneal(func, x0, args=(), options=None):
"""
Minimization of scalar function of one or more variables using the
simulated annealing algorithm.
@@ -352,6 +350,8 @@ def _minimize_anneal(func, x0, args=(), options={}, full_output=0):
This function is called by the `minimize` function with
`method=anneal`. It is not supposed to be called directly.
"""
+ if options is None:
+ options = {}
# retrieve useful options
schedule = options.get('schedule', 'fast')
T0 = options.get('T0')
@@ -449,25 +449,18 @@ def _minimize_anneal(func, x0, args=(), options={}, full_output=0):
retval = 4
break
- if full_output:
- info = {'solution': best_state.x,
- 'fun' : best_state.cost,
- 'T' : schedule.T,
- 'nfev' : schedule.feval,
- 'nit' : iters,
- 'accept' : schedule.accepted,
- 'status' : retval,
- 'success' : retval <= 1}
- info['message'] = {0: 'Points no longer changing',
- 1: 'Cooled to final temperature',
- 2: 'Maximum function evaluations',
- 3: 'Maximum cooling iterations reached',
- 4: 'Maximum accepted query locations reached',
- 5: 'Final point not the minimum amongst '
- 'encountered points'}[retval]
- return best_state.x, info
- else:
- return best_state.x
+ result = Result(x=best_state.x, fun=best_state.cost,
+ T=schedule.T, nfev=schedule.feval, nit=iters,
+ accept=schedule.accepted, status=retval,
+ success=(retval <= 1),
+ message={0: 'Points no longer changing',
+ 1: 'Cooled to final temperature',
+ 2: 'Maximum function evaluations',
+ 3: 'Maximum cooling iterations reached',
+ 4: 'Maximum accepted query locations reached',
+ 5: 'Final point not the minimum amongst '
+ 'encountered points'}[retval])
+ return result
View
17 scipy/optimize/cobyla.py
@@ -9,6 +9,7 @@
"""
from scipy.optimize import _cobyla
+from optimize import Result
from numpy import copy
from warnings import warn
@@ -159,11 +160,10 @@ def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4,
'disp' : iprint != 0,
'maxfev': maxfun}
- return _minimize_cobyla(func, x0, args, constraints=con, options=opts,
- full_output=False)
+ return _minimize_cobyla(func, x0, args, constraints=con,
+ options=opts)['x']
-def _minimize_cobyla(fun, x0, args=(), constraints=(), options={},
- full_output=False):
+def _minimize_cobyla(fun, x0, args=(), constraints=(), options=None):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
@@ -183,6 +183,8 @@ def _minimize_cobyla(fun, x0, args=(), constraints=(), options={},
This function is called by the `minimize` function with
`method=COBYLA`. It is not supposed to be called directly.
"""
+ if options is None:
+ options = {}
# retrieve useful options
rhobeg = options.get('rhobeg', 1.0)
rhoend = options.get('rhoend', 1e-4)
@@ -232,12 +234,7 @@ def calcfc(x, con):
xopt = _cobyla.minimize(calcfc, m=m, x=copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun)
- if full_output:
- warn('COBYLA does not handle full_output parameter.',
- RuntimeWarning)
- return xopt, dict()
- else:
- return xopt
+ return Result(x=xopt)
if __name__ == '__main__':
View
35 scipy/optimize/lbfgsb.py
@@ -26,7 +26,7 @@
from numpy import array, asarray, float64, int32, zeros
import _lbfgsb
-from optimize import approx_fprime, MemoizeJac
+from optimize import approx_fprime, MemoizeJac, Result
from numpy.compat import asbytes
__all__ = ['fmin_l_bfgs_b']
@@ -160,18 +160,18 @@ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
'eps' : epsilon,
'maxfev': maxfun}
- x, info = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
- options=opts, full_output=True)
- d = {'grad': info['jac'],
- 'task': info['message'],
- 'funcalls': info['nfev'],
- 'warnflag': info['status']}
- f = info['fun']
+ res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
+ options=opts)
+ d = {'grad': res['jac'],
+ 'task': res['message'],
+ 'funcalls': res['nfev'],
+ 'warnflag': res['status']}
+ f = res['fun']
+ x = res['x']
return x, f, d
-def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, options={},
- full_output=False):
+def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, options=None):
"""
Minimize a scalar function of one or more variables using the L-BFGS-B
algorithm.
@@ -205,6 +205,8 @@ def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, options={},
This function is called by the `minimize` function with
`method=L-BFGS-B`. It is not supposed to be called directly.
"""
+ if options is None:
+ options = {}
# retrieve useful options
disp = options.get('disp', None)
m = options.get('maxcor', 10)
@@ -303,17 +305,8 @@ def func_and_grad(x):
'warnflag' : warnflag
}
- if full_output:
- info = {'fun': f,
- 'jac': g,
- 'nfev': n_function_evals,
- 'status': warnflag,
- 'message': task_str,
- 'solution': x,
- 'success': warnflag==0}
- return x, info
- else:
- return x
+ return Result(fun=f, jac=g, nfev=n_function_evals, status=warnflag,
+ message=task_str, x=x, success=(warnflag==0))
if __name__ == '__main__':
View
379 scipy/optimize/optimize.py
@@ -18,7 +18,7 @@
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
- 'line_search', 'check_grad']
+ 'line_search', 'check_grad', 'Result']
__docformat__ = "restructuredtext en"
@@ -60,6 +60,53 @@ def derivative(self, x, *args):
self(x, *args)
return self.jac
+class Result(dict):
+ """ Represents the optimization result.
+
+ Attributes
+ ----------
+ x : ndarray
+ The solution of the optimization.
+ success : bool
+ Whether or not the optimizer exited successfully.
+ status : int
+ Termination status of the optimizer. Its value depends on the
+ underlying solver. Refer to `message` for details.
+ message : str
+ Description of the cause of the termination.
+ fun, jac, hess : ndarray
+ Values of objective function, Jacobian and Hessian (if available).
+ nfev, njev, nhev: int
+ Number of evaluations of the objective functions and of its
+ Jacobian and Hessian.
+ nit: int
+ Number of iterations performed by the optimizer.
+
+ Notes
+ -----
+ There may be additional attributes not listed above depending of the
+ specific solver. Since this class is essentially a subclass of dict
+ with attribute accessors, one can see which attributes are available
+ using the `keys()` method.
+ """
+ def __getattr__(self, name):
+ try:
+ return self[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ __setattr__ = dict.__setitem__
+ __delattr__ = dict.__delitem__
+
+ def __repr__(self):
+ if self.keys():
+ m = max(map(len, self.keys())) + 1
+ return '\n'.join([k.rjust(m) + ': ' + repr(v)
+ for k, v in self.iteritems()])
+ else:
+ return self.__class__.__name__ + "()"
+
+
# These have been copied from Numeric's MLab.py
# I don't think they made the transition to scipy_core
def max(m, axis=0):
@@ -293,30 +340,22 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
- 'disp': disp}
+ 'disp': disp,
+ 'return_all': retall}
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_neldermead(func, x0, args, opts, full_output=True,
- retall=retall, callback=callback)
- else:
- out = _minimize_neldermead(func, x0, args, opts, full_output,
- retall, callback)
+ res = _minimize_neldermead(func, x0, args, opts, callback=callback)
if full_output:
- x, info = out
- retlist = x, info['fun'], info['nit'], info['nfev'], info['status']
+ retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
- retlist += (info['allvecs'], )
+ retlist += (res['allvecs'], )
return retlist
else:
if retall:
- x, info = out
- return x, info['allvecs']
+ return res['x'], res['allvecs']
else:
- return out
+ return res['x']
-def _minimize_neldermead(func, x0, args=(), options={}, full_output=0,
- retall=0, callback=None):
+def _minimize_neldermead(func, x0, args=(), options=None, callback=None):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
@@ -336,12 +375,15 @@ def _minimize_neldermead(func, x0, args=(), options={}, full_output=0,
This function is called by the `minimize` function with
`method=Nelder-Mead`. It is not supposed to be called directly.
"""
+ if options is None:
+ options = {}
# retrieve useful options
xtol = options.get('xtol', 1e-4)
ftol = options.get('ftol', 1e-4)
maxiter = options.get('maxiter')
maxfun = options.get('maxfev')
disp = options.get('disp', False)
+ retall = options.get('return_all', False)
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
@@ -472,19 +514,12 @@ def _minimize_neldermead(func, x0, args=(), options={}, full_output=0,
print " Function evaluations: %d" % fcalls[0]
- if full_output:
- info = {'fun': fval,
- 'nit': iterations,
- 'nfev': fcalls[0],
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': x}
- if retall:
- info['allvecs'] = allvecs
- return x, info
- else:
- return x
+ result = Result(fun=fval, nit=iterations, nfev=fcalls[0],
+ status=warnflag, success=(warnflag == 0), message=msg,
+ x=x)
+ if retall:
+ result['allvecs'] = allvecs
+ return result
def approx_fprime(xk, f, epsilon, *args):
@@ -668,32 +703,24 @@ def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
'norm': norm,
'eps': epsilon,
'disp': disp,
- 'maxiter': maxiter}
+ 'maxiter': maxiter,
+ 'return_all': retall}
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_bfgs(f, x0, args, fprime, opts, full_output=True,
- retall=retall, callback=callback)
- else:
- out = _minimize_bfgs(f, x0, args, fprime, opts, full_output,
- retall, callback)
+ res = _minimize_bfgs(f, x0, args, fprime, opts, callback=callback)
if full_output:
- x, info = out
- retlist = x, info['fun'], info['jac'], info['hess'], \
- info['nfev'], info['njev'], info['status']
+ retlist = res['x'], res['fun'], res['jac'], res['hess'], \
+ res['nfev'], res['njev'], res['status']
if retall:
- retlist += (info['allvecs'], )
+ retlist += (res['allvecs'], )
return retlist
else:
if retall:
- x, info = out
- return x, info['allvecs']
+ return res['x'], res['allvecs']
else:
- return out
+ return res['x']
-def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, full_output=0,
- retall=0, callback=None):
+def _minimize_bfgs(fun, x0, args=(), jac=None, options=None, callback=None):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
@@ -716,12 +743,15 @@ def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, full_output=0,
"""
f = fun
fprime = jac
+ if options is None:
+ options = {}
# retrieve useful options
gtol = options.get('gtol', 1e-5)
norm = options.get('norm', Inf)
epsilon = options.get('eps', _epsilon)
maxiter = options.get('maxiter')
disp = options.get('disp', False)
+ retall = options.get('return_all', False)
x0 = asarray(x0).flatten()
if x0.ndim == 0:
@@ -799,8 +829,7 @@ def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, full_output=0,
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + rhok * sk[:, numpy.newaxis] \
* sk[numpy.newaxis, :]
- if disp or full_output:
- fval = old_fval
+ fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
@@ -828,21 +857,12 @@ def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, full_output=0,
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
- if full_output:
- info = {'fun': fval,
- 'jac': gfk,
- 'hess': Hk,
- 'nfev': func_calls[0],
- 'njev': grad_calls[0],
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': xk}
- if retall:
- info['allvecs'] = allvecs
- return xk, info
- else:
- return xk
+ result = Result(fun=fval, jac=gfk, hess=Hk, nfev=func_calls[0],
+ njev=grad_calls[0], status=warnflag,
+ success=(warnflag == 0), message=msg, x=xk)
+ if retall:
+ result['allvecs'] = allvecs
+ return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
@@ -918,30 +938,23 @@ def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
'norm': norm,
'eps': epsilon,
'disp': disp,
- 'maxiter': maxiter}
+ 'maxiter': maxiter,
+ 'return_all': retall}
+
+ res = _minimize_cg(f, x0, args, fprime, opts, callback=callback)
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_cg(f, x0, args, fprime, opts, full_output=True,
- retall=retall, callback=callback)
- else:
- out = _minimize_cg(f, x0, args, fprime, opts, full_output, retall,
- callback)
if full_output:
- x, info = out
- retlist = x, info['fun'], info['nfev'], info['njev'], info['status']
+ retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
- retlist += (info['allvecs'], )
+ retlist += (res['allvecs'], )
return retlist
else:
if retall:
- x, info = out
- return x, info['allvecs']
+ return res['x'], res['allvecs']
else:
- return out
+ return res['x']
-def _minimize_cg(fun, x0, args=(), jac=None, options={}, full_output=0,
- retall=0, callback=None):
+def _minimize_cg(fun, x0, args=(), jac=None, options=None, callback=None):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
@@ -964,12 +977,15 @@ def _minimize_cg(fun, x0, args=(), jac=None, options={}, full_output=0,
"""
f = fun
fprime = jac
+ if options is None:
+ options = {}
# retrieve useful options
gtol = options.get('gtol', 1e-5)
norm = options.get('norm', Inf)
epsilon = options.get('eps', _epsilon)
maxiter = options.get('maxiter')
disp = options.get('disp', False)
+ retall = options.get('return_all', False)
x0 = asarray(x0).flatten()
if maxiter is None:
@@ -1025,8 +1041,7 @@ def _minimize_cg(fun, x0, args=(), jac=None, options={}, full_output=0,
k += 1
- if disp or full_output:
- fval = old_fval
+ fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
@@ -1055,20 +1070,12 @@ def _minimize_cg(fun, x0, args=(), jac=None, options={}, full_output=0,
print " Gradient evaluations: %d" % grad_calls[0]
- if full_output:
- info = {'fun': fval,
- 'jac': gfk,
- 'nfev': func_calls[0],
- 'njev': grad_calls[0],
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': xk}
- if retall:
- info['allvecs'] = allvecs
- return xk, info
- else:
- return xk
+ result = Result(fun=fval, jac=gfk, nfev=func_calls[0],
+ njev=grad_calls[0], status=warnflag,
+ success=(warnflag == 0), message=msg, x=xk)
+ if retall:
+ result['allvecs'] = allvecs
+ return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
@@ -1166,33 +1173,26 @@ def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
opts = {'xtol': avextol,
'epsilon': epsilon,
'maxiter': maxiter,
- 'disp': disp}
+ 'disp': disp,
+ 'return_all': retall}
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, opts,
- full_output=True, retall=retall,
- callback=callback)
- else:
- out = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, opts,
- full_output, retall, callback)
+ res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, opts,
+ callback=callback)
if full_output:
- x, info = out
- retlist = x, info['fun'], info['nfev'], info['njev'], \
- info['nhev'], info['status']
+ retlist = res['x'], res['fun'], res['nfev'], res['njev'], \
+ res['nhev'], res['status']
if retall:
- retlist += (info['allvecs'], )
+ retlist += (res['allvecs'], )
return retlist
else:
if retall:
- x, info = out
- return x, info['allvecs']
+ return res['x'], res['allvecs']
else:
- return out
+ return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
- options={}, full_output=0, retall=0, callback=None):
+ options=None, callback=None):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
@@ -1219,11 +1219,14 @@ def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
fprime = jac
fhess_p = hessp
fhess = hess
+ if options is None:
+ options = {}
# retrieve useful options
avextol = options.get('xtol', 1e-5)
epsilon = options.get('eps', _epsilon)
maxiter = options.get('maxiter')
disp = options.get('disp', False)
+ retall = options.get('return_all', False)
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
@@ -1297,8 +1300,7 @@ def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
allvecs.append(xk)
k += 1
- if disp or full_output:
- fval = old_fval
+ fval = old_fval
if k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
@@ -1320,21 +1322,12 @@ def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
print " Gradient evaluations: %d" % gcalls[0]
print " Hessian evaluations: %d" % hcalls
- if full_output:
- info = {'fun': fval,
- 'jac': gfk,
- 'nfev': fcalls[0],
- 'njev': gcalls[0],
- 'nhev': hcalls,
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': xk}
- if retall:
- info['allvecs'] = allvecs
- return xk, info
- else:
- return xk
+ result = Result(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0],
+ nhev=hcalls, status=warnflag, success=(warnflag == 0),
+ message=msg, x=xk)
+ if retall:
+ result['allvecs'] = allvecs
+ return result
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
@@ -1392,16 +1385,15 @@ def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
'maxfev': maxfun,
'disp': disp}
- out = _minimize_scalar_bounded(func, (x1, x2), args, options,
- full_output)
+ res = _minimize_scalar_bounded(func, (x1, x2), args, options)
if full_output:
- x, info = out
- return x, info['fun'], info['status'], info['nfev']
+ return res['x'], res['fun'], res['status'], res['nfev']
else:
- return out
+ return res['x']
-def _minimize_scalar_bounded(func, bounds, args=(), options={},
- full_output=False):
+def _minimize_scalar_bounded(func, bounds, args=(), options=None):
+ if options is None:
+ options = {}
# retrieve options
xtol = options.get('xtol', 1e-5)
maxfun = options.get('maxfev', 500)
@@ -1517,18 +1509,13 @@ def _minimize_scalar_bounded(func, bounds, args=(), options={},
if disp > 0:
_endprint(x, flag, fval, maxfun, xtol, disp)
- if full_output:
- info = {'fun': fval,
- 'status': flag,
- 'success': flag == 0,
- 'message': {0: 'Solution found.',
- 1: 'Maximum number of function '
- 'calls reached.'}.get(flag, ''),
- 'nfev': num}
-
- return xf, info
- else:
- return xf
+ result = Result(fun=fval, status=flag, success=(flag == 0),
+ message={0: 'Solution found.',
+ 1: 'Maximum number of function calls '
+ 'reached.'}.get(flag, ''),
+ x=xf, nfev=num)
+
+ return result
class Brent:
#need to rethink design of __init__
@@ -1717,33 +1704,26 @@ def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
options = {'ftol': tol,
'maxiter': maxiter}
- out = _minimize_scalar_brent(func, brack, args, options, full_output)
+ res = _minimize_scalar_brent(func, brack, args, options)
if full_output:
- x, info = out
- return x, info['fun'], info['nit'], info['nfev']
+ return res['x'], res['fun'], res['nit'], res['nfev']
else:
- return out
+ return res['x']
-def _minimize_scalar_brent(func, brack=None, args=(), options={},
- full_output=False):
+def _minimize_scalar_brent(func, brack=None, args=(), options=None):
+ if options is None:
+ options = {}
# retrieve options
tol = options.get('ftol', 1.48e-8)
maxiter = options.get('maxiter', 500)
brent = Brent(func=func, args=args, tol=tol,
- full_output=full_output, maxiter=maxiter)
+ full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
- out = brent.get_result(full_output=full_output)
- if full_output:
- x, fval, nit, nfev = out
- info = {'fun': fval,
- 'nit': nit,
- 'nfev': nfev}
- return x, info
- else:
- return out
+ x, fval, nit, nfev = brent.get_result(full_output=True)
+ return Result(fun=fval, x=x, nit=nit, nfev=nfev)
def golden(func, args=(), brack=None, tol=_epsilon, full_output=0):
""" Given a function of one-variable and a possible bracketing interval,
@@ -1779,15 +1759,15 @@ def golden(func, args=(), brack=None, tol=_epsilon, full_output=0):
"""
options = {'ftol': tol}
- out = _minimize_scalar_golden(func, brack, args, options, full_output)
+ res = _minimize_scalar_golden(func, brack, args, options)
if full_output:
- x, info = out
- return x, info['fun'], info['nfev']
+ return res['x'], res['fun'], res['nfev']
else:
- return out
+ return res['x']
-def _minimize_scalar_golden(func, brack=None, args=(), options={},
- full_output=False):
+def _minimize_scalar_golden(func, brack=None, args=(), options=None):
+ if options is None:
+ options = {}
tol = options.get('ftol', _epsilon)
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
@@ -1835,11 +1815,8 @@ def _minimize_scalar_golden(func, brack=None, args=(), options={},
else:
xmin = x2
fval = f2
- if full_output:
- info = {'fun': fval, 'nfev': funcalls}
- return xmin, info
- else:
- return xmin
+
+ return Result(fun=fval, nfev=funcalls, x=xmin)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
@@ -2045,31 +2022,24 @@ def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
- 'direc': direc}
+ 'direc': direc,
+ 'return_all': retall}
+
+ res = _minimize_powell(func, x0, args, opts, callback=callback)
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_powell(func, x0, args, opts, full_output=True,
- retall=retall, callback=callback)
- else:
- out = _minimize_powell(func, x0, args, opts, full_output, retall,
- callback)
if full_output:
- x, info = out
- retlist = x, info['fun'], info['direc'], info['nit'], \
- info['nfev'], info['status']
+ retlist = res['x'], res['fun'], res['direc'], res['nit'], \
+ res['nfev'], res['status']
if retall:
- retlist += (info['allvecs'], )
+ retlist += (res['allvecs'], )
return retlist
else:
if retall:
- x, info = out
- return x, info['allvecs']
+ return res['x'], res['allvecs']
else:
- return out
+ return res['x']
-def _minimize_powell(func, x0, args=(), options={}, full_output=0,
- retall=0, callback=None):
+def _minimize_powell(func, x0, args=(), options=None, callback=None):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
@@ -2091,6 +2061,8 @@ def _minimize_powell(func, x0, args=(), options={}, full_output=0,
This function is called by the `minimize` function with
`method=Powell`. It is not supposed to be called directly.
"""
+ if options is None:
+ options = {}
# retrieve useful options
xtol = options.get('xtol', 1e-4)
ftol = options.get('ftol', 1e-4)
@@ -2098,6 +2070,7 @@ def _minimize_powell(func, x0, args=(), options={}, full_output=0,
maxfun = options.get('maxfev')
disp = options.get('disp', False)
direc = options.get('direc')
+ retall = options.get('return_all', False)
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
@@ -2182,20 +2155,12 @@ def _minimize_powell(func, x0, args=(), options={}, full_output=0,
x = squeeze(x)
- if full_output:
- info = {'fun': fval,
- 'direc': direc,
- 'nit': iter,
- 'nfev': fcalls[0],
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': x}
- if retall:
- info['allvecs'] = allvecs
- return x, info
- else:
- return x
+ result = Result(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
+ status=warnflag, success=(warnflag == 0), message=msg,
+ x=x)
+ if retall:
+ result['allvecs'] = allvecs
+ return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
View
32 scipy/optimize/slsqp.py
@@ -10,7 +10,7 @@
from scipy.optimize._slsqp import slsqp
from numpy import zeros, array, linalg, append, asfarray, concatenate, finfo, \
sqrt, vstack, exp, inf, where, isinf, atleast_1d
-from optimize import wrap_function
+from optimize import wrap_function, Result
__docformat__ = "restructuredtext en"
@@ -184,17 +184,15 @@ def fmin_slsqp( func, x0 , eqcons=[], f_eqcons=None, ieqcons=[], f_ieqcons=None,
cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons,
'args': args}, )
- out = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds,
- constraints=cons, options=opts,
- full_output=full_output)
+ res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds,
+ constraints=cons, options=opts)
if full_output:
- x, info = out
- return x, info['fun'], info['nit'], info['status'], info['message']
+ return res['x'], res['fun'], res['nit'], res['status'], res['message']
else:
- return out
+ return res['x']
def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None,
- constraints=(), options={}, full_output=False):
+ constraints=(), options=None):
"""
Minimize a scalar function of one or more variables using Sequential
Least SQuares Programming (SLSQP).
@@ -212,6 +210,8 @@ def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None,
`method=SLSQP`. It is not supposed to be called directly.
"""
fprime = jac
+ if options is None:
+ options = {}
# retrieve useful options
iter = options.get('maxiter', 100)
acc = options.get('ftol', 1.0E-6)
@@ -404,19 +404,9 @@ def cjac(x, *args):
print " Function evaluations:", feval[0]
print " Gradient evaluations:", geval[0]
- if not full_output:
- return x
- else:
- info = {'solution': x,
- 'fun' : fx,
- 'jac' : g,
- 'nit' : int(majiter),
- 'nfev' : feval[0],
- 'njev' : geval[0],
- 'status' : int(mode),
- 'message' : exit_modes[int(mode)],
- 'success' : mode == 0}
- return x, info
+ return Result(x=x, fun=fx, jac=g, nit=int(majiter), nfev=feval[0],
+ njev=geval[0], status=int(mode),
+ message=exit_modes[int(mode)], success=(mode == 0))
if __name__ == '__main__':
View
6 scipy/optimize/tests/test_anneal.py
@@ -44,9 +44,9 @@ def anneal_schedule(self, schedule='fast', use_wrapper=False):
'maxiter' : self.maxiter,
'schedule': schedule,
'disp' : False}
- x, info = minimize(self.fun[n], self.x0[n], method='anneal',
- options=opts, full_output=True)
- retval = info['status']
+ res = minimize(self.fun[n], self.x0[n], method='anneal',
+ options=opts)
+ x, retval = res['x'], res['status']
else:
x, retval = anneal(self.fun[n], self.x0[n], full_output=False,
upper=self.upper[n], lower=self.lower[n],
View
2  scipy/optimize/tests/test_cobyla.py
@@ -31,7 +31,7 @@ def test_minimize_simple(self):
cons = ({'type': 'ineq', 'fun': self.con1},
{'type': 'ineq', 'fun': self.con2})
x = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
- options=self.opts)
+ options=self.opts).x
assert_allclose(x, self.solution, atol=1e-4)
if __name__ == "__main__":
View
152 scipy/optimize/tests/test_optimize.py
@@ -67,15 +67,14 @@ def hessp(self, x, p):
def test_cg(self, use_wrapper=False):
""" conjugate gradient optimization routine """
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
- params, info = optimize.minimize(self.func, self.startparams,
- args=(), method='CG',
- jac=self.grad, options=opts,
- full_output=True,
- retall=False)
-
- fopt, func_calls, grad_calls, warnflag = \
- info['fun'], info['nfev'], info['njev'], info['status']
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
+ res = optimize.minimize(self.func, self.startparams, args=(),
+ method='CG', jac=self.grad,
+ options=opts)
+
+ params, fopt, func_calls, grad_calls, warnflag = \
+ res['x'], res['fun'], res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (),
maxiter=self.maxiter,
@@ -101,16 +100,15 @@ def test_cg(self, use_wrapper=False):
def test_bfgs(self, use_wrapper=False):
""" Broyden-Fletcher-Goldfarb-Shanno optimization routine """
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
- params, info = optimize.minimize(self.func, self.startparams,
- jac=self.grad, method='BFGS',
- args=(), options=opts,
- full_output=True,
- retall=False)
-
- fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
- info['fun'], info['jac'], info['hess'], info['nfev'], \
- info['njev'], info['status']
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
+ res = optimize.minimize(self.func, self.startparams,
+ jac=self.grad, method='BFGS', args=(),
+ options=opts)
+
+ params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
+ res['x'], res['fun'], res['jac'], res['hess'], \
+ res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
@@ -166,7 +164,7 @@ def test_bfgs_infinite(self, use_wrapper=False):
if use_wrapper:
opts = {'disp': False}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
- args=(), options=opts)
+ args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(not np.isfinite(func(x)))
@@ -178,15 +176,13 @@ def test_powell(self, use_wrapper=False):
""" Powell (direction set) optimization routine
"""
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
- params, info = optimize.minimize(self.func, self.startparams,
- args=(), method='Powell',
- options=opts,
- full_output=True,
- retall=False)
- fopt, direc, numiter, func_calls, warnflag = \
- info['fun'], info['direc'], info['nit'], info['nfev'], \
- info['status']
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
+ res = optimize.minimize(self.func, self.startparams, args=(),
+ method='Powell', options=opts)
+ params, fopt, direc, numiter, func_calls, warnflag = \
+ res['x'], res['fun'], res['direc'], res['nit'], \
+ res['nfev'], res['status']
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
@@ -222,14 +218,13 @@ def test_neldermead(self, use_wrapper=False):
""" Nelder-Mead simplex algorithm
"""
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
- params, info = optimize.minimize(self.func, self.startparams,
- args=(), method='Nelder-mead',
- options=opts,
- full_output=True,
- retall=False)
- fopt, numiter, func_calls, warnflag = \
- info['fun'], info['nit'], info['nfev'], info['status']
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
+ res = optimize.minimize(self.func, self.startparams, args=(),
+ method='Nelder-mead', options=opts)
+ params, fopt, numiter, func_calls, warnflag = \
+ res['x'], res['fun'], res['nit'], res['nfev'], \
+ res['status']
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
@@ -255,11 +250,11 @@ def test_ncg(self, use_wrapper=False):
""" line-search Newton conjugate gradient optimization routine
"""
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
- args=(), options=opts,
- full_output=False, retall=False)
+ args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
@@ -287,12 +282,12 @@ def test_ncg(self, use_wrapper=False):
def test_ncg_hess(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian """
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess = self.hess,
- args=(), options=opts,
- full_output=False, retall=False)
+ args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess = self.hess,
@@ -321,12 +316,12 @@ def test_ncg_hess(self, use_wrapper=False):
def test_ncg_hessp(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian times a vector p """
if use_wrapper:
- opts = {'maxit': self.maxiter, 'disp': False}
+ opts = {'maxit': self.maxiter, 'disp': False,
+ 'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp = self.hessp,
- args=(), options=opts,
- full_output=False, retall=False)
+ args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p = self.hessp,
@@ -405,7 +400,7 @@ def test_minimize_l_bfgs_b(self):
opts = {'disp': False, 'maxiter': self.maxiter}
x = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
- options=opts)
+ options=opts)['x']
assert_allclose(self.func(x), self.func(self.solution),
atol=1e-6)
@@ -460,11 +455,10 @@ def test_l_bfgs_b_funjac(self):
def test_minimize_l_bfgs_b_bounds(self):
""" Minimize with method='L-BFGS-B' with bounds """
- x, info = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
- jac=self.jac, bounds=self.bounds,
- full_output=True)
- assert_(info['success'], info['message'])
- assert_allclose(x, self.solution, atol=1e-6)
+ res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
+ jac=self.jac, bounds=self.bounds)
+ assert_(res['success'], res['message'])
+ assert_allclose(res.x, self.solution, atol=1e-6)
class TestOptimizeScalar(TestCase):
"""Tests for scalar optimizers"""
@@ -525,45 +519,45 @@ def test_fminbound_scalar(self):
def test_minimize_scalar(self):
# combine all tests above for the minimize_scalar wrapper
- x = optimize.minimize_scalar(self.fun)
+ x = optimize.minimize_scalar(self.fun).x
assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, bracket = (-3, -2),
- args=(1.5, ), method='Brent')
+ x= optimize.minimize_scalar(self.fun, bracket = (-3, -2),
+ args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, method='Brent',
- args=(1.5, ), full_output=True)[0]
+ x= optimize.minimize_scalar(self.fun, method='Brent',
+ args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
- args=(1.5, ), method='Brent')
+ x= optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
+ args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket = (-3, -2),
- args=(1.5, ), method='golden')
+ args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='golden',
- args=(1.5, ), full_output=True)[0]
+ args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
- args=(1.5, ), method='golden')
+ args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
- method='Bounded')
+ method='Bounded').x
assert_allclose(x, 1, atol=1e-4)
- x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
- method='bounded')
+ x= optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
+ method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
- x = optimize.minimize_scalar(self.fun,
- bounds=(np.array([1]), np.array([5])),
- args=(np.array([1.5]), ),
- method='bounded')
+ x= optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
+ np.array([5])),
+ args=(np.array([1.5]), ),
+ method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
@@ -573,7 +567,7 @@ def test_minimize_scalar(self):
bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
- method='bounded')
+ method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
class TestTnc(TestCase):
@@ -688,7 +682,7 @@ def test_minimize_tnc1(self):
x = optimize.minimize(self.f1, x0, method='TNC',
jac=self.g1, bounds=bnds,
- options=self.opts)
+ options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc1b(self):
@@ -696,7 +690,7 @@ def test_minimize_tnc1b(self):
x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x = optimize.minimize(self.f1, x0, method='TNC',
- bounds=bnds, options=self.opts)
+ bounds=bnds, options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4)
def test_minimize_tnc1c(self):
@@ -705,7 +699,7 @@ def test_minimize_tnc1c(self):
xopt = [1, 1]
x = optimize.minimize(self.fg1, x0, method='TNC',
jac=True, bounds=bnds,
- options=self.opts)
+ options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc2(self):
@@ -714,7 +708,7 @@ def test_minimize_tnc2(self):
xopt = [-1.2210262419616387, 1.5]
x = optimize.minimize(self.f1, x0, method='TNC',
jac=self.g1, bounds=bnds,
- options=self.opts)
+ options=self.opts).x
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc3(self):
@@ -723,7 +717,7 @@ def test_minimize_tnc3(self):
xopt = [0, 0]
x = optimize.minimize(self.f3, x0, method='TNC',
jac=self.g3, bounds=bnds,
- options=self.opts)
+ options=self.opts).x
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8)
def test_minimize_tnc4(self):
@@ -732,7 +726,7 @@ def test_minimize_tnc4(self):
xopt = [1, 0]
x = optimize.minimize(self.f4, x0, method='TNC',
jac=self.g4, bounds=bnds,
- options=self.opts)
+ options=self.opts).x
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8)
def test_minimize_tnc5(self):
@@ -741,7 +735,7 @@ def test_minimize_tnc5(self):
xopt = [-0.54719755119659763, -1.5471975511965976]
x = optimize.minimize(self.f5, x0, method='TNC',
jac=self.g5, bounds=bnds,
- options=self.opts)
+ options=self.opts).x
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8)
def test_minimize_tnc38(self):
@@ -750,7 +744,7 @@ def test_minimize_tnc38(self):
xopt = [1]*4
x = optimize.minimize(self.f38, x0, method='TNC',
jac=self.g38, bounds=bnds,
- options=self.opts)
+ options=self.opts).x
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8)
def test_minimize_tnc45(self):
@@ -759,7 +753,7 @@ def test_minimize_tnc45(self):
xopt = [1, 2, 3, 4, 5]
x = optimize.minimize(self.f45, x0, method='TNC',
jac=self.g45, bounds=bnds,
- options=self.opts)
+ options=self.opts).x
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8)
# fmin_tnc
View
144 scipy/optimize/tests/test_slsqp.py
@@ -85,126 +85,122 @@ def fprime_ieqcon2(self, x):
# minimize
def test_minimize_unbounded_approximated(self):
""" Minimize, method='SLSQP': unbounded, approximated jacobian. """
- x, info = minimize(self.fun, [-1.0, 1.0], args = (-1.0, ),
- method='SLSQP', options=self.opts,
- full_output=True)
- assert_(info['success'], info['message'])
- assert_allclose(x, [2, 1])
+ res = minimize(self.fun, [-1.0, 1.0], args = (-1.0, ),
+ method='SLSQP', options=self.opts)
+ assert_(res['success'], res['message'])
+ assert_allclose(res.x, [2, 1])
def test_minimize_unbounded_given(self):
""" Minimize, method='SLSQP': unbounded, given jacobian. """
- x, info = minimize(self.fun, [-1.0, 1.0], args = (-1.0, ),
- jac=self.jac, method='SLSQP', options=self.opts,
- full_output=True)
- assert_(info['success'], info['message'])
- assert_allclose(x, [2, 1])
+ res = minimize(self.fun, [-1.0, 1.0], args = (-1.0, ),
+ jac=self.jac, method='SLSQP', options=self.opts)
+ assert_(res['success'], res['message'])
+ assert_allclose(res.x, [2, 1])
def test_minimize_unbounded_combined(self):
""" \
Minimize, method='SLSQP': unbounded, combined function and jacobian.
"""
- x, info = minimize(self.fun_and_jac, [-1.0, 1.0], args = (-1.0, ),
- jac=True, method='SLSQP',
- options=self.opts, full_output=True)
- assert_(info['success'], info['message'])
- assert_allclose(x, [2, 1])
+ res = minimize(self.fun_and_jac, [-1.0, 1.0], args = (-1.0, ),
+ jac=True, method='SLSQP', options=self.opts)
+ assert_(res['success'], res['message'])
+ assert_allclose(res.x, [2, 1])
def test_minimize_equality_approximated(self):
""" \
Minimize with method='SLSQP': equality constraint, approx. jacobian.
"""
- x, info = minimize(self.fun, [-1.0, 1.0], args = (-1.0, ),
- constraints={'type': 'eq',
- 'fun': self.f_eqcon,
- 'args': (-1.0, )},
- method='SLSQP', options=self.opts,
- full_output=True)
- assert_(info['success'], info['message'])
- assert_allclose(x, [1, 1])
+ res = minimize(self.fun, [-1.0, 1.0], args = (-1.0, ),
+ constraints={'type': 'eq',
+ 'fun': self.f_eqcon,
+ 'args': (-1.0, )},
+ method='SLSQP', options=self.opts)
+ assert_(res['success'], res['message'])
+ assert_allclose(res.x, [1, 1])
def test_minimize_equality_given(self):
""" \
Minimize with method='SLSQP': equality constraint, given jacobian.
"""
- x, info = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
- method='SLSQP', args=(-1.0,),
- constraints={'type': 'eq', 'fun':self.f_eqcon,
-