# scipy/scipy

ENH: optimize: add a convenience tol= argument to minimize*()

1 parent 535b17c commit a45382134b96207c5fcf0e297413d55f25e9bf82 pv committed May 19, 2012
Showing with 72 additions and 30 deletions.
1. +26 −8 scipy/optimize/_minimize.py
2. +24 −22 scipy/optimize/optimize.py
3. +2 −0 scipy/optimize/slsqp.py
4. +20 −0 scipy/optimize/tests/test_optimize.py
 @@ -28,8 +28,8 @@ from slsqp import _minimize_slsqp def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None, - hessp=None, bounds=None, constraints=(), - options=None, callback=None): + hessp=None, bounds=None, constraints=(), tol=None, + callback=None, options=None): """ Minimization of scalar function of one or more variables. @@ -92,6 +92,9 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None, Equality constraint means that the constraint function result is to be zero whereas inequality means that it is to be non-negative. Note that COBYLA only supports inequality constraints. + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. options : dict, optional A dictionary of solver options. All methods accept the following generic options: @@ -318,6 +321,19 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None, else: jac = None + # set default tolerances + if tol is not None: + options = dict(options) + if meth in ['nelder-mead', 'newton-cg', 'powell', 'tnc']: + options.setdefault('xtol', tol) + if meth in ['nelder-mead', 'powell', 'anneal', 'l-bfgs-b', 'tnc', + 'slsqp']: + options.setdefault('ftol', tol) + if meth in ['bfgs', 'cg', 'l-bfgs-b', 'tnc']: + options.setdefault('gtol', tol) + if meth in ['cobyla']: + options.setdefault('tol', tol) + if meth == 'nelder-mead': return _minimize_neldermead(fun, x0, args, callback, **options) elif meth == 'powell': @@ -345,7 +361,7 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None, def minimize_scalar(fun, bracket=None, bounds=None, args=(), - method='brent', options=None): + method='brent', tol=None, options=None): """ Minimization of scalar function of one variable. @@ -374,16 +390,14 @@ def minimize_scalar(fun, bracket=None, bounds=None, args=(), - 'Brent' - 'Bounded' - 'Golden' - + tol : float, optional + Tolerance for termination. For detailed control, use solver-specific + options. options : dict, optional A dictionary of solver options. xtol : float Relative error in solution `xopt` acceptable for convergence. - ftol : float - Relative error in ``fun(xopt)`` acceptable for convergence. - gtol : float - Maximum gradient acceptable for convergence. maxiter : int Maximum number of iterations to perform. disp : bool @@ -445,6 +459,10 @@ def minimize_scalar(fun, bracket=None, bounds=None, args=(), if options is None: options = {} + if tol is not None: + options = dict(options) + options.setdefault('xtol', tol) + if meth == 'brent': return _minimize_scalar_brent(fun, bracket, args, **options) elif meth == 'bounded':
 @@ -2328,6 +2328,8 @@ def show_options(solver, method=None): Initial set of direction vectors for the Powell method. * Anneal options: + ftol : float + Relative error in ``fun(x)`` acceptable for convergence. schedule : str Annealing schedule to use. One of: 'fast', 'cauchy' or 'boltzmann'. @@ -2345,8 +2347,6 @@ def show_options(solver, method=None): stringent test at each temperature). learn_rate : float Scale constant for adjusting guesses. - ftol : float - Relative error in ``fun(x)`` acceptable for convergence. quench, m, n : float Parameters to alter fast_sa schedule. lower, upper : float or ndarray @@ -2355,22 +2355,35 @@ def show_options(solver, method=None): The number of times to search the space at each temperature. * L-BFGS-B options: - maxcor : int - The maximum number of variable metric corrections used to - define the limited memory matrix. (The limited memory BFGS - method does not store the full hessian but uses this many terms - in an approximation to it.) ftol : float The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. gtol : float The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= gtol`` where ``pg_i`` is the i-th component of the projected gradient. + maxcor : int + The maximum number of variable metric corrections used to + define the limited memory matrix. (The limited memory BFGS + method does not store the full hessian but uses this many terms + in an approximation to it.) maxiter : int Maximum number of function evaluations. * TNC options: + ftol : float + Precision goal for the value of f in the stoping criterion. + If ftol < 0.0, ftol is set to 0.0 defaults to -1. + xtol : float + Precision goal for the value of x in the stopping + criterion (after applying x scaling factors). If xtol < + 0.0, xtol is set to sqrt(machine_precision). Defaults to + -1. + gtol : float + Precision goal for the value of the projected gradient in + the stopping criterion (after applying x scaling factors). + If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy). + Setting it to 0.0 is not recommended. Defaults to -1. scale : list of floats Scaling factors to apply to each variable. If None, the factors are up-low for interval bounded variables and @@ -2399,34 +2412,23 @@ def show_options(solver, method=None): Defaults to 0. minfev : float Minimum function value estimate. Defaults to 0. - ftol : float - Precision goal for the value of f in the stoping criterion. - If ftol < 0.0, ftol is set to 0.0 defaults to -1. - xtol : float - Precision goal for the value of x in the stopping - criterion (after applying x scaling factors). If xtol < - 0.0, xtol is set to sqrt(machine_precision). Defaults to - -1. - gtol : float - Precision goal for the value of the projected gradient in - the stopping criterion (after applying x scaling factors). - If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy). - Setting it to 0.0 is not recommended. Defaults to -1. rescale : float Scaling factor (in log10) used to trigger f value rescaling. If 0, rescale at each iteration. If a large value, never rescale. If < 0, rescale is set to 1.3. * COBYLA options: - rhobeg : float - Reasonable initial changes to the variables. tol : float Final accuracy in the optimization (not precisely guaranteed). This is a lower bound on the size of the trust region. + rhobeg : float + Reasonable initial changes to the variables. maxfev : int Maximum number of function evaluations. * SLSQP options: + ftol : float + Precision goal for the value of f in the stopping criterion. eps : float Step size used for numerical approximation of the jacobian. maxiter : int
 @@ -209,6 +209,8 @@ def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None, Least SQuares Programming (SLSQP). Options for the SLSQP algorithm are: + ftol : float + Precision goal for the value of f in the stopping criterion. eps : float Step size used for numerical approximation of the jacobian. disp : bool
 @@ -440,6 +440,26 @@ def test_minimize(self): self.setUp() self.test_powell(True) + def test_minimize_tol_parameter(self): + # Check that the minimize() tol= argument does something + def func(z): + x, y = z + return x**2*y**2 + x**4 + 1 + def jac(z): + x, y = z + return np.array([2*x*y**2 + 4*x**3, 2*x**2*y]) + + for method in ['nelder-mead', 'powell', 'cg', 'bfgs', + 'newton-cg', 'anneal', 'l-bfgs-b', 'tnc', + 'cobyla', 'slsqp']: + sol1 = optimize.minimize(func, [1,1], jac=jac, tol=1e-10, + method=method) + sol2 = optimize.minimize(func, [1,1], jac=jac, tol=1.0, + method=method) + assert_(func(sol1.x) < func(sol2.x), + "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x))) + + class TestLBFGSBBounds(TestCase): """ Tests for L-BFGS-B with bounds """ def setUp(self):