Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

ENH: drop full_output parameter from minimize

`minimize` and underlying `_minimize_METHOD` functions always return a
tuple containing the solution and the `info` dictionnary.
  • Loading branch information...
commit 4aa309e08b17958a52759ccfafdb003f21f528a4 1 parent 094adeb
@dlax dlax authored
View
43 scipy/optimize/_minimize.py
@@ -29,8 +29,7 @@
def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
hessp=None, bounds=None, constraints=(),
- options=dict(), full_output=False, callback=None,
- retall=False):
+ options=dict(), callback=None, retall=False):
"""
Minimization of scalar function of one or more variables.
@@ -92,8 +91,6 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
disp : bool
Set to True to print convergence messages.
For method-specific options, see `show_minimize_options`.
- full_output : bool, optional
- If True, return optional outputs. Default is False.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
@@ -106,7 +103,7 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
xopt : ndarray
The solution.
info : dict
- A dictionary of optional outputs (depending on the chosen method)
+ A dictionary of extra outputs (depending on the chosen method)
with the keys:
solution : ndarray
The solution (same as `xopt`).
@@ -249,7 +246,7 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
- >>> xopt = minimize(rosen, x0, method='Nelder-Mead')
+ >>> xopt = minimize(rosen, x0, method='Nelder-Mead')[0]
Optimization terminated successfully.
Current function value: 0.000066
Iterations: 141
@@ -261,8 +258,7 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
options:
>>> xopt, info = minimize(rosen, x0, method='BFGS', jac=rosen_der,
- ... options={'gtol': 1e-6, 'disp': False},
- ... full_output=True)
+ ... options={'gtol': 1e-6, 'disp': False})
>>> print info['message']
Optimization terminated successfully.
@@ -294,7 +290,7 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
The optimization problem is solved using the SLSQP method as:
>>> xopt, info = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
- ... constraints=cons, full_output=True)
+ ... constraints=cons)
It should converge to the theoretical solution (1.4 ,1.7).
"""
@@ -339,34 +335,29 @@ def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None,
jac = None
if meth == 'nelder-mead':
- return _minimize_neldermead(fun, x0, args, options, full_output,
- retall, callback)
+ return _minimize_neldermead(fun, x0, args, options, retall,
+ callback)
elif meth == 'powell':
- return _minimize_powell(fun, x0, args, options, full_output,
- retall, callback)
+ return _minimize_powell(fun, x0, args, options, retall, callback)
elif meth == 'cg':
- return _minimize_cg(fun, x0, args, jac, options, full_output,
- retall, callback)
+ return _minimize_cg(fun, x0, args, jac, options, retall, callback)
elif meth == 'bfgs':
- return _minimize_bfgs(fun, x0, args, jac, options, full_output,
- retall, callback)
+ return _minimize_bfgs(fun, x0, args, jac, options, retall,
+ callback)
elif meth == 'newton-cg':
return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, options,
- full_output, retall, callback)
+ retall, callback)
elif meth == 'anneal':
- return _minimize_anneal(fun, x0, args, options, full_output)
+ return _minimize_anneal(fun, x0, args, options)
elif meth == 'l-bfgs-b':
- return _minimize_lbfgsb(fun, x0, args, jac, bounds, options,
- full_output)
+ return _minimize_lbfgsb(fun, x0, args, jac, bounds, options)
elif meth == 'tnc':
- return _minimize_tnc(fun, x0, args, jac, bounds, options,
- full_output)
+ return _minimize_tnc(fun, x0, args, jac, bounds, options)
elif meth == 'cobyla':
- return _minimize_cobyla(fun, x0, args, constraints, options,
- full_output)
+ return _minimize_cobyla(fun, x0, args, constraints, options)
elif meth == 'slsqp':
return _minimize_slsqp(fun, x0, args, jac, bounds,
- constraints, options, full_output)
+ constraints, options)
else:
raise ValueError('Unknown solver %s' % method)
View
41 scipy/optimize/anneal.py
@@ -303,9 +303,7 @@ def anneal(func, x0, args=(), schedule='fast', full_output=0,
'dwell' : dwell,
'disp' : disp}
- # call _minimize_anneal full_output=True in order to always retrieve
- # retval (aka info['status'])
- x, info = _minimize_anneal(func, x0, args, opts, full_output=True)
+ x, info = _minimize_anneal(func, x0, args, opts)
if full_output:
return x, info['fun'], info['T'], info['nfev'], info['nit'], \
@@ -313,7 +311,7 @@ def anneal(func, x0, args=(), schedule='fast', full_output=0,
else:
return x, info['status']
-def _minimize_anneal(func, x0, args=(), options={}, full_output=0):
+def _minimize_anneal(func, x0, args=(), options={}):
"""
Minimization of scalar function of one or more variables using the
simulated annealing algorithm.
@@ -449,25 +447,22 @@ def _minimize_anneal(func, x0, args=(), options={}, full_output=0):
retval = 4
break
- if full_output:
- info = {'solution': best_state.x,
- 'fun' : best_state.cost,
- 'T' : schedule.T,
- 'nfev' : schedule.feval,
- 'nit' : iters,
- 'accept' : schedule.accepted,
- 'status' : retval,
- 'success' : retval <= 1}
- info['message'] = {0: 'Points no longer changing',
- 1: 'Cooled to final temperature',
- 2: 'Maximum function evaluations',
- 3: 'Maximum cooling iterations reached',
- 4: 'Maximum accepted query locations reached',
- 5: 'Final point not the minimum amongst '
- 'encountered points'}[retval]
- return best_state.x, info
- else:
- return best_state.x
+ info = {'solution': best_state.x,
+ 'fun' : best_state.cost,
+ 'T' : schedule.T,
+ 'nfev' : schedule.feval,
+ 'nit' : iters,
+ 'accept' : schedule.accepted,
+ 'status' : retval,
+ 'success' : retval <= 1}
+ info['message'] = {0: 'Points no longer changing',
+ 1: 'Cooled to final temperature',
+ 2: 'Maximum function evaluations',
+ 3: 'Maximum cooling iterations reached',
+ 4: 'Maximum accepted query locations reached',
+ 5: 'Final point not the minimum amongst '
+ 'encountered points'}[retval]
+ return best_state.x, info
View
14 scipy/optimize/cobyla.py
@@ -159,11 +159,10 @@ def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4,
'disp' : iprint != 0,
'maxfev': maxfun}
- return _minimize_cobyla(func, x0, args, constraints=con, options=opts,
- full_output=False)
+ return _minimize_cobyla(func, x0, args, constraints=con,
+ options=opts)[0]
-def _minimize_cobyla(fun, x0, args=(), constraints=(), options={},
- full_output=False):
+def _minimize_cobyla(fun, x0, args=(), constraints=(), options={}):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
@@ -232,12 +231,7 @@ def calcfc(x, con):
xopt = _cobyla.minimize(calcfc, m=m, x=copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun)
- if full_output:
- warn('COBYLA does not handle full_output parameter.',
- RuntimeWarning)
- return xopt, dict()
- else:
- return xopt
+ return xopt, dict()
if __name__ == '__main__':
View
24 scipy/optimize/lbfgsb.py
@@ -161,7 +161,7 @@ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
'maxfev': maxfun}
x, info = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
- options=opts, full_output=True)
+ options=opts)
d = {'grad': info['jac'],
'task': info['message'],
'funcalls': info['nfev'],
@@ -170,8 +170,7 @@ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
return x, f, d
-def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, options={},
- full_output=False):
+def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, options={}):
"""
Minimize a scalar function of one or more variables using the L-BFGS-B
algorithm.
@@ -303,17 +302,14 @@ def func_and_grad(x):
'warnflag' : warnflag
}
- if full_output:
- info = {'fun': f,
- 'jac': g,
- 'nfev': n_function_evals,
- 'status': warnflag,
- 'message': task_str,
- 'solution': x,
- 'success': warnflag==0}
- return x, info
- else:
- return x
+ info = {'fun': f,
+ 'jac': g,
+ 'nfev': n_function_evals,
+ 'status': warnflag,
+ 'message': task_str,
+ 'solution': x,
+ 'success': warnflag==0}
+ return x, info
if __name__ == '__main__':
View
222 scipy/optimize/optimize.py
@@ -295,28 +295,21 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
'maxfev': maxfun,
'disp': disp}
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_neldermead(func, x0, args, opts, full_output=True,
- retall=retall, callback=callback)
- else:
- out = _minimize_neldermead(func, x0, args, opts, full_output,
- retall, callback)
+ x, info = _minimize_neldermead(func, x0, args, opts, retall=retall,
+ callback=callback)
if full_output:
- x, info = out
retlist = x, info['fun'], info['nit'], info['nfev'], info['status']
if retall:
retlist += (info['allvecs'], )
return retlist
else:
if retall:
- x, info = out
return x, info['allvecs']
else:
- return out
+ return x
-def _minimize_neldermead(func, x0, args=(), options={}, full_output=0,
- retall=0, callback=None):
+def _minimize_neldermead(func, x0, args=(), options={}, retall=0,
+ callback=None):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
@@ -472,19 +465,16 @@ def _minimize_neldermead(func, x0, args=(), options={}, full_output=0,
print " Function evaluations: %d" % fcalls[0]
- if full_output:
- info = {'fun': fval,
- 'nit': iterations,
- 'nfev': fcalls[0],
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': x}
- if retall:
- info['allvecs'] = allvecs
- return x, info
- else:
- return x
+ info = {'fun': fval,
+ 'nit': iterations,
+ 'nfev': fcalls[0],
+ 'status': warnflag,
+ 'success': warnflag == 0,
+ 'message': msg,
+ 'solution': x}
+ if retall:
+ info['allvecs'] = allvecs
+ return x, info
def approx_fprime(xk, f, epsilon, *args):
@@ -670,16 +660,10 @@ def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
'disp': disp,
'maxiter': maxiter}
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_bfgs(f, x0, args, fprime, opts, full_output=True,
- retall=retall, callback=callback)
- else:
- out = _minimize_bfgs(f, x0, args, fprime, opts, full_output,
- retall, callback)
+ x, info = _minimize_bfgs(f, x0, args, fprime, opts, retall=retall,
+ callback=callback)
if full_output:
- x, info = out
retlist = x, info['fun'], info['jac'], info['hess'], \
info['nfev'], info['njev'], info['status']
if retall:
@@ -687,13 +671,12 @@ def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
return retlist
else:
if retall:
- x, info = out
return x, info['allvecs']
else:
- return out
+ return x
-def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, full_output=0,
- retall=0, callback=None):
+def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, retall=0,
+ callback=None):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
@@ -799,8 +782,7 @@ def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, full_output=0,
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + rhok * sk[:, numpy.newaxis] \
* sk[numpy.newaxis, :]
- if disp or full_output:
- fval = old_fval
+ fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
@@ -828,21 +810,18 @@ def _minimize_bfgs(fun, x0, args=(), jac=None, options={}, full_output=0,
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
- if full_output:
- info = {'fun': fval,
- 'jac': gfk,
- 'hess': Hk,
- 'nfev': func_calls[0],
- 'njev': grad_calls[0],
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': xk}
- if retall:
- info['allvecs'] = allvecs
- return xk, info
- else:
- return xk
+ info = {'fun': fval,
+ 'jac': gfk,
+ 'hess': Hk,
+ 'nfev': func_calls[0],
+ 'njev': grad_calls[0],
+ 'status': warnflag,
+ 'success': warnflag == 0,
+ 'message': msg,
+ 'solution': xk}
+ if retall:
+ info['allvecs'] = allvecs
+ return xk, info
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
@@ -920,28 +899,22 @@ def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
'disp': disp,
'maxiter': maxiter}
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_cg(f, x0, args, fprime, opts, full_output=True,
- retall=retall, callback=callback)
- else:
- out = _minimize_cg(f, x0, args, fprime, opts, full_output, retall,
- callback)
+ x, info = _minimize_cg(f, x0, args, fprime, opts, retall=retall,
+ callback=callback)
+
if full_output:
- x, info = out
retlist = x, info['fun'], info['nfev'], info['njev'], info['status']
if retall:
retlist += (info['allvecs'], )
return retlist
else:
if retall:
- x, info = out
return x, info['allvecs']
else:
- return out
+ return x
-def _minimize_cg(fun, x0, args=(), jac=None, options={}, full_output=0,
- retall=0, callback=None):
+def _minimize_cg(fun, x0, args=(), jac=None, options={}, retall=0,
+ callback=None):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
@@ -1025,8 +998,7 @@ def _minimize_cg(fun, x0, args=(), jac=None, options={}, full_output=0,
k += 1
- if disp or full_output:
- fval = old_fval
+ fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
@@ -1055,20 +1027,17 @@ def _minimize_cg(fun, x0, args=(), jac=None, options={}, full_output=0,
print " Gradient evaluations: %d" % grad_calls[0]
- if full_output:
- info = {'fun': fval,
- 'jac': gfk,
- 'nfev': func_calls[0],
- 'njev': grad_calls[0],
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': xk}
- if retall:
- info['allvecs'] = allvecs
- return xk, info
- else:
- return xk
+ info = {'fun': fval,
+ 'jac': gfk,
+ 'nfev': func_calls[0],
+ 'njev': grad_calls[0],
+ 'status': warnflag,
+ 'success': warnflag == 0,
+ 'message': msg,
+ 'solution': xk}
+ if retall:
+ info['allvecs'] = allvecs
+ return xk, info
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
@@ -1168,17 +1137,10 @@ def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
'maxiter': maxiter,
'disp': disp}
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, opts,
- full_output=True, retall=retall,
- callback=callback)
- else:
- out = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, opts,
- full_output, retall, callback)
+ x, info = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, opts,
+ retall=retall, callback=callback)
if full_output:
- x, info = out
retlist = x, info['fun'], info['nfev'], info['njev'], \
info['nhev'], info['status']
if retall:
@@ -1186,13 +1148,12 @@ def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
return retlist
else:
if retall:
- x, info = out
return x, info['allvecs']
else:
- return out
+ return x
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
- options={}, full_output=0, retall=0, callback=None):
+ options={}, retall=0, callback=None):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
@@ -1297,8 +1258,7 @@ def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
allvecs.append(xk)
k += 1
- if disp or full_output:
- fval = old_fval
+ fval = old_fval
if k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
@@ -1320,21 +1280,18 @@ def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
print " Gradient evaluations: %d" % gcalls[0]
print " Hessian evaluations: %d" % hcalls
- if full_output:
- info = {'fun': fval,
- 'jac': gfk,
- 'nfev': fcalls[0],
- 'njev': gcalls[0],
- 'nhev': hcalls,
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': xk}
- if retall:
- info['allvecs'] = allvecs
- return xk, info
- else:
- return xk
+ info = {'fun': fval,
+ 'jac': gfk,
+ 'nfev': fcalls[0],
+ 'njev': gcalls[0],
+ 'nhev': hcalls,
+ 'status': warnflag,
+ 'success': warnflag == 0,
+ 'message': msg,
+ 'solution': xk}
+ if retall:
+ info['allvecs'] = allvecs
+ return xk, info
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
@@ -2047,15 +2004,10 @@ def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
'disp': disp,
'direc': direc}
- # force full_output if retall=True to preserve backwards compatibility
- if retall and not full_output:
- out = _minimize_powell(func, x0, args, opts, full_output=True,
- retall=retall, callback=callback)
- else:
- out = _minimize_powell(func, x0, args, opts, full_output, retall,
- callback)
+ x, info = _minimize_powell(func, x0, args, opts, retall=retall,
+ callback=callback)
+
if full_output:
- x, info = out
retlist = x, info['fun'], info['direc'], info['nit'], \
info['nfev'], info['status']
if retall:
@@ -2063,13 +2015,12 @@ def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
return retlist
else:
if retall:
- x, info = out
return x, info['allvecs']
else:
- return out
+ return x
-def _minimize_powell(func, x0, args=(), options={}, full_output=0,
- retall=0, callback=None):
+def _minimize_powell(func, x0, args=(), options={}, retall=0,
+ callback=None):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
@@ -2182,20 +2133,17 @@ def _minimize_powell(func, x0, args=(), options={}, full_output=0,
x = squeeze(x)
- if full_output:
- info = {'fun': fval,
- 'direc': direc,
- 'nit': iter,
- 'nfev': fcalls[0],
- 'status': warnflag,
- 'success': warnflag == 0,
- 'message': msg,
- 'solution': x}
- if retall:
- info['allvecs'] = allvecs
- return x, info
- else:
- return x
+ info = {'fun': fval,
+ 'direc': direc,
+ 'nit': iter,
+ 'nfev': fcalls[0],
+ 'status': warnflag,
+ 'success': warnflag == 0,
+ 'message': msg,
+ 'solution': x}
+ if retall:
+ info['allvecs'] = allvecs
+ return x, info
def _endprint(x, flag, fval, maxfun, xtol, disp):
View
33 scipy/optimize/slsqp.py
@@ -184,17 +184,15 @@ def fmin_slsqp( func, x0 , eqcons=[], f_eqcons=None, ieqcons=[], f_ieqcons=None,
cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons,
'args': args}, )
- out = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds,
- constraints=cons, options=opts,
- full_output=full_output)
+ x, info = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds,
+ constraints=cons, options=opts)
if full_output:
- x, info = out
return x, info['fun'], info['nit'], info['status'], info['message']
else:
- return out
+ return x
def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None,
- constraints=(), options={}, full_output=False):
+ constraints=(), options={}):
"""
Minimize a scalar function of one or more variables using Sequential
Least SQuares Programming (SLSQP).
@@ -405,19 +403,16 @@ def cjac(x, *args):
print " Function evaluations:", feval[0]
print " Gradient evaluations:", geval[0]
- if not full_output:
- return x
- else:
- info = {'solution': x,
- 'fun' : fx,
- 'jac' : g,
- 'nit' : int(majiter),
- 'nfev' : feval[0],
- 'njev' : geval[0],
- 'status' : int(mode),
- 'message' : exit_modes[int(mode)],
- 'success' : mode == 0}
- return x, info
+ info = {'solution': x,
+ 'fun' : fx,
+ 'jac' : g,
+ 'nit' : int(majiter),
+ 'nfev' : feval[0],
+ 'njev' : geval[0],
+ 'status' : int(mode),
+ 'message' : exit_modes[int(mode)],
+ 'success' : mode == 0}
+ return x, info
if __name__ == '__main__':
View
2  scipy/optimize/tests/test_cobyla.py
@@ -31,7 +31,7 @@ def test_minimize_simple(self):
cons = ({'type': 'ineq', 'fun': self.con1},
{'type': 'ineq', 'fun': self.con2})
x = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
- options=self.opts)
+ options=self.opts)[0]
assert_allclose(x, self.solution, atol=1e-4)
if __name__ == "__main__":
View
35 scipy/optimize/tests/test_optimize.py
@@ -71,7 +71,6 @@ def test_cg(self, use_wrapper=False):
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='CG',
jac=self.grad, options=opts,
- full_output=True,
retall=False)
fopt, func_calls, grad_calls, warnflag = \
@@ -105,7 +104,6 @@ def test_bfgs(self, use_wrapper=False):
params, info = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS',
args=(), options=opts,
- full_output=True,
retall=False)
fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
@@ -166,7 +164,7 @@ def test_bfgs_infinite(self, use_wrapper=False):
if use_wrapper:
opts = {'disp': False}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
- args=(), options=opts)
+ args=(), options=opts)[0]
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(not np.isfinite(func(x)))
@@ -182,7 +180,6 @@ def test_powell(self, use_wrapper=False):
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='Powell',
options=opts,
- full_output=True,
retall=False)
fopt, direc, numiter, func_calls, warnflag = \
info['fun'], info['direc'], info['nit'], info['nfev'], \
@@ -226,7 +223,6 @@ def test_neldermead(self, use_wrapper=False):
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='Nelder-mead',
options=opts,
- full_output=True,
retall=False)
fopt, numiter, func_calls, warnflag = \
info['fun'], info['nit'], info['nfev'], info['status']
@@ -259,7 +255,7 @@ def test_ncg(self, use_wrapper=False):
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts,
- full_output=False, retall=False)
+ retall=False)[0]
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
@@ -292,7 +288,7 @@ def test_ncg_hess(self, use_wrapper=False):
method='Newton-CG', jac=self.grad,
hess = self.hess,
args=(), options=opts,
- full_output=False, retall=False)
+ retall=False)[0]
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess = self.hess,
@@ -326,7 +322,7 @@ def test_ncg_hessp(self, use_wrapper=False):
method='Newton-CG', jac=self.grad,
hessp = self.hessp,
args=(), options=opts,
- full_output=False, retall=False)
+ retall=False)[0]
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p = self.hessp,
@@ -405,7 +401,7 @@ def test_minimize_l_bfgs_b(self):
opts = {'disp': False, 'maxiter': self.maxiter}
x = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
- options=opts)
+ options=opts)[0]
assert_allclose(self.func(x), self.func(self.solution),
atol=1e-6)
@@ -461,8 +457,7 @@ def test_l_bfgs_b_funjac(self):
def test_minimize_l_bfgs_b_bounds(self):
""" Minimize with method='L-BFGS-B' with bounds """
x, info = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
- jac=self.jac, bounds=self.bounds,
- full_output=True)
+ jac=self.jac, bounds=self.bounds)
assert_(info['success'], info['message'])
assert_allclose(x, self.solution, atol=1e-6)
@@ -688,7 +683,7 @@ def test_minimize_tnc1(self):
x = optimize.minimize(self.f1, x0, method='TNC',
jac=self.g1, bounds=bnds,
- options=self.opts)
+ options=self.opts)[0]
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc1b(self):
@@ -696,7 +691,7 @@ def test_minimize_tnc1b(self):
x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x = optimize.minimize(self.f1, x0, method='TNC',
- bounds=bnds, options=self.opts)
+ bounds=bnds, options=self.opts)[0]
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4)
def test_minimize_tnc1c(self):
@@ -705,7 +700,7 @@ def test_minimize_tnc1c(self):
xopt = [1, 1]
x = optimize.minimize(self.fg1, x0, method='TNC',
jac=True, bounds=bnds,
- options=self.opts)
+ options=self.opts)[0]
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc2(self):
@@ -714,7 +709,7 @@ def test_minimize_tnc2(self):
xopt = [-1.2210262419616387, 1.5]
x = optimize.minimize(self.f1, x0, method='TNC',
jac=self.g1, bounds=bnds,
- options=self.opts)
+ options=self.opts)[0]
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
def test_minimize_tnc3(self):
@@ -723,7 +718,7 @@ def test_minimize_tnc3(self):
xopt = [0, 0]
x = optimize.minimize(self.f3, x0, method='TNC',
jac=self.g3, bounds=bnds,
- options=self.opts)
+ options=self.opts)[0]
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8)
def test_minimize_tnc4(self):
@@ -732,7 +727,7 @@ def test_minimize_tnc4(self):
xopt = [1, 0]
x = optimize.minimize(self.f4, x0, method='TNC',
jac=self.g4, bounds=bnds,
- options=self.opts)
+ options=self.opts)[0]
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8)
def test_minimize_tnc5(self):
@@ -741,7 +736,7 @@ def test_minimize_tnc5(self):
xopt = [-0.54719755119659763, -1.5471975511965976]
x = optimize.minimize(self.f5, x0, method='TNC',
jac=self.g5, bounds=bnds,
- options=self.opts)
+ options=self.opts)[0]
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8)
def test_minimize_tnc38(self):
@@ -750,7 +745,7 @@ def test_minimize_tnc38(self):
xopt = [1]*4
x = optimize.minimize(self.f38, x0, method='TNC',
jac=self.g38, bounds=bnds,
- options=self.opts)
+ options=self.opts)[0]
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8)
def test_minimize_tnc45(self):
@@ -759,7 +754,7 @@ def test_minimize_tnc45(self):
xopt = [1, 2, 3, 4, 5]
x = optimize.minimize(self.f45, x0, method='TNC',
jac=self.g45, bounds=bnds,
- options=self.opts)
+ options=self.opts)[0]
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8)
# fmin_tnc
View
22 scipy/optimize/tests/test_slsqp.py
@@ -78,16 +78,14 @@ def fprime_ieqcon2(self, x):
def test_minimize_unbounded_approximated(self):
""" Minimize, method='SLSQP': unbounded, approximated jacobian. """
x, info = minimize(self.fun, [-1.0, 1.0], args = (-1.0, ),
- method='SLSQP', options=self.opts,
- full_output=True)
+ method='SLSQP', options=self.opts)
assert_(info['success'], info['message'])
assert_allclose(x, [2, 1])
def test_minimize_unbounded_given(self):
""" Minimize, method='SLSQP': unbounded, given jacobian. """
x, info = minimize(self.fun, [-1.0, 1.0], args = (-1.0, ),
- jac=self.jac, method='SLSQP', options=self.opts,
- full_output=True)
+ jac=self.jac, method='SLSQP', options=self.opts)
assert_(info['success'], info['message'])
assert_allclose(x, [2, 1])
@@ -96,8 +94,7 @@ def test_minimize_unbounded_combined(self):
Minimize, method='SLSQP': unbounded, combined function and jacobian.
"""
x, info = minimize(self.fun_and_jac, [-1.0, 1.0], args = (-1.0, ),
- jac=True, method='SLSQP',
- options=self.opts, full_output=True)
+ jac=True, method='SLSQP', options=self.opts)
assert_(info['success'], info['message'])
assert_allclose(x, [2, 1])
@@ -109,8 +106,7 @@ def test_minimize_equality_approximated(self):
constraints={'type': 'eq',
'fun': self.f_eqcon,
'args': (-1.0, )},
- method='SLSQP', options=self.opts,
- full_output=True)
+ method='SLSQP', options=self.opts)
assert_(info['success'], info['message'])
assert_allclose(x, [1, 1])
@@ -122,7 +118,7 @@ def test_minimize_equality_given(self):
method='SLSQP', args=(-1.0,),
constraints={'type': 'eq', 'fun':self.f_eqcon,
'args': (-1.0, )},
- options=self.opts, full_output=True)
+ options=self.opts)
assert_(info['success'], info['message'])
assert_allclose(x, [1, 1])
@@ -137,7 +133,7 @@ def test_minimize_equality_given2(self):
'fun': self.f_eqcon,
'args': (-1.0, ),
'jac': self.fprime_eqcon},
- options=self.opts, full_output=True)
+ options=self.opts)
assert_(info['success'], info['message'])
assert_allclose(x, [1, 1])
@@ -150,7 +146,7 @@ def test_minimize_inequality_given(self):
constraints={'type': 'ineq',
'fun': self.f_ieqcon,
'args': (-1.0, )},
- options=self.opts, full_output=True)
+ options=self.opts)
assert_(info['success'], info['message'])
assert_allclose(x, [2, 1], atol=1e-3)
@@ -163,7 +159,7 @@ def test_minimize_inequality_given_vector_constraints(self):
constraints={'type': 'ineq',
'fun': self.f_ieqcon2,
'jac': self.fprime_ieqcon2},
- options=self.opts, full_output=True)
+ options=self.opts)
assert_(info['success'], info['message'])
assert_allclose(x, [2, 1])
@@ -179,7 +175,7 @@ def test_minimize_bound_equality_given2(self):
'fun': self.f_eqcon,
'args': (-1.0, ),
'jac': self.fprime_eqcon},
- options=self.opts, full_output=True)
+ options=self.opts)
assert_(info['success'], info['message'])
assert_allclose(x, [0.8, 0.8], atol=1e-3)
View
27 scipy/optimize/tnc.py
@@ -254,13 +254,11 @@ def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
'rescale': rescale,
'disp': False}
- x, info = _minimize_tnc(fun, x0, args, jac, bounds, options=opts,
- full_output=True)
+ x, info = _minimize_tnc(fun, x0, args, jac, bounds, options=opts)
return x, info['nfev'], info['status']
-def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None, options={},
- full_output=False):
+def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None, options={}):
"""
Minimize a scalar function of one or more variables using a truncated
Newton (TNC) algorithm.
@@ -400,18 +398,15 @@ def func_and_grad(x):
fmin, ftol, xtol, pgtol, rescale)
xopt = array(x)
- if full_output:
- funv, jacv = func_and_grad(xopt)
- info = {'solution': xopt,
- 'fun': funv,
- 'jac': jacv,
- 'nfev': nf,
- 'status': rc,
- 'message': RCSTRINGS[rc],
- 'success': -1 < rc < 3}
- return xopt, info
- else:
- return xopt
+ funv, jacv = func_and_grad(xopt)
+ info = {'solution': xopt,
+ 'fun': funv,
+ 'jac': jacv,
+ 'nfev': nf,
+ 'status': rc,
+ 'message': RCSTRINGS[rc],
+ 'success': -1 < rc < 3}
+ return xopt, info
if __name__ == '__main__':
# Examples for TNC
Please sign in to comment.
Something went wrong with that request. Please try again.