Skip to content
Browse files

DOC: merge wiki edits for optimize module.

  • Loading branch information...
1 parent 35807da commit 9bb3d0515a03f4def2c6f728874af213708ccfce @rgommers rgommers committed Nov 14, 2010
View
134 scipy/optimize/__init__.py
@@ -1,3 +1,137 @@
+"""
+Optimization Tools
+==================
+
+General-purpose Optimization Routines
+-------------------------------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fmin - Nelder-Mead Simplex algorithm #
+ fmin_powell - Powell's (modified) level set method #
+ fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm ##
+ fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno) ##
+ fmin_ncg - Line-search Newton Conjugate Gradient ###
+ leastsq - Minimize the sum of squares of M equations in N unknowns
+
+Constrained Optimizers (Multivariate)
+-------------------------------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer %
+ fmin_tnc - Truncated Newton code %%
+ fmin_cobyla - Constrained optimization by linear approximation
+ fmin_slsqp - Minimization using sequential least-squares programming
+ nnls - Linear least-squares problem with non-negativity constraint
+
+Global Optimizers
+-----------------
+
+.. autosummary::
+ :toctree: generated/
+
+ anneal - Simulated annealing
+ brute - Brute force searching optimizer
+
+Scalar Function Minimizers
+--------------------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fminbound - Bounded minimization of a scalar function
+ brent - 1-D function minimization using Brent method
+ golden - 1-D function minimization using Golden Section method
+ bracket - Bracket a minimum, given two starting points
+
+General-purpose Root-finding Routines
+-------------------------------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fsolve - Non-linear multi-variable equation solver
+
+Scalar Function Solvers
+-----------------------
+
+.. autosummary::
+ :toctree: generated/
+
+ brentq - quadratic interpolation Brent method
+ brenth - Brent method, modified by Harris with hyperbolic extrapolation
+ ridder - Ridder's method
+ bisect - Bisection method
+ newton - Secant method or Newton's method
+ fixed_point - Single-variable fixed-point solver
+
+General-purpose Non-linear Multidimensional Solvers
+---------------------------------------------------
+
+.. autosummary::
+ :toctree: generated/
+
+ broyden1 - Broyden's first method $
+ broyden2 - Broyden's second method $$
+ broyden3 - Broyden's third method $$$
+ broyden_generalized - Generalized Broyden's method &
+ anderson - Extended Anderson method &&
+ anderson2 - The Anderson method &&&
+
+Utility Functions
+-----------------
+
+.. autosummary::
+ :toctree: generated/
+
+ line_search - Return a step that satisfies the strong Wolfe conditions
+ check_grad - Check the supplied derivative using finite differences
+
+Related Software
+----------------
+
+OpenOpt - A BSD-licensed optimization framework (see `<http://openopt.org>`_) that includes: a number of constrained and
+unconstrained solvers from and beyond the scipy.optimize module; unified
+text and graphical output of convergence information; and automatic
+differentiation.
+
+Notes
+-----
+# Uses only function calls
+
+## Can use function and gradient
+
+### Can use function, gradient, and Hessian
+
+% If you use fmin_l_bfgs_b, please cite Zhu, Byrd, and Nocedal's papers; see the function's docstring for references.
+
+%% Originally written by Stephen Nash, adapted to C by Jean-Sebastien Roy.
+
+$ broyden1 is a quasi-Newton-Raphson method for updating an approximate
+Jacobian and then inverting it.
+
+$$ broyden2 is the same as broyden1, but updates the inverse Jacobian
+directly.
+
+$$$ broyden3 is the same as broyden2, but instead of directly computing
+the inverse Jacobian, it remembers how to construct it using vectors, and
+when computing inv(J)*F, it uses those vectors to compute this product,
+thus avoding the expensive NxN matrix multiplication.
+
+& broyden_generalized is the same as broyden2, but instead of
+approximating the full NxN Jacobian, it constructs it at every iteration
+in a way that avoids the NxN matrix multiplication. This is not as
+precise as broyden3.
+
+&& anderson is the same as broyden_generalized, but (w_0**2)*I is added
+before taking inversion to improve the stability.
+
+&&& anderson2 is the same as anderson, but formulated differently.
+
+"""
#
# optimize - Optimization Tools
#
View
33 scipy/optimize/cobyla.py
@@ -18,13 +18,14 @@ def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4,
Parameters
----------
- func : callable f(x, *args)
- Function to minimize.
+ func : callable
+ Function to minimize. In the form func(x, \\*args).
x0 : ndarray
Initial guess.
cons : sequence
Constraint functions; must all be ``>=0`` (a single function
- if only 1 constraint).
+ if only 1 constraint). Each function takes the parameters `x`
+ as its first argument.
args : tuple
Extra arguments to pass to function.
consargs : tuple
@@ -36,7 +37,7 @@ def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4,
rhoend :
Final accuracy in the optimization (not precisely guaranteed).
iprint : {0, 1, 2, 3}
- Controls the frequency of output; 0 implies no output. Deprecated
+ Controls the frequency of output; 0 implies no output. Deprecated.
disp : {0, 1, 2, 3}
Over-rides the iprint interface. Preferred.
maxfun : int
@@ -47,6 +48,30 @@ def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4,
x : ndarray
The argument that minimises `f`.
+ Examples
+ --------
+ Minimize the objective function f(x,y) = x*y subject
+ to the constraints x**2 + y**2 < 1 and y > 0::
+
+ >>> def objective(x):
+ ... return x[0]*x[1]
+ ...
+ >>> def constr1(x):
+ ... return 1 - (x[0]**2 + x[1]**2)
+ ...
+ >>> def constr2(x):
+ ... return x[1]
+ ...
+ >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
+
+ Normal return from subroutine COBYLA
+
+ NFVALS = 64 F =-5.000000E-01 MAXCV = 1.998401E-14
+ X =-7.071069E-01 7.071067E-01
+ array([-0.70710685, 0.70710671])
+
+ The exact solution is (-sqrt(2)/2, sqrt(2)/2).
+
"""
err = "cons must be a sequence of callable functions or a single"\
" callable function."
View
52 scipy/optimize/lbfgsb.py
@@ -79,8 +79,9 @@ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
calculating the gradient
iprint : int
Controls the frequency of output. ``iprint < 0`` means no output.
- disp : int
- If zero, then no output. If positive number, then this over-rides iprint
+ disp : int, optional
+ If zero, then no output. If positive number, then this over-rides
+ `iprint`.
maxfun : int
Maximum number of function evaluations.
@@ -93,35 +94,34 @@ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
d : dict
Information dictionary.
- d['warnflag'] is
- 0 if converged,
- 1 if too many function evaluations,
- 2 if stopped for another reason, given in d['task']
- d['grad'] is the gradient at the minimum (should be 0 ish)
- d['funcalls'] is the number of function calls made.
+ * d['warnflag'] is
+ - 0 if converged,
+ - 1 if too many function evaluations,
+ - 2 if stopped for another reason, given in d['task']
+ * d['grad'] is the gradient at the minimum (should be 0 ish)
+ * d['funcalls'] is the number of function calls made.
- Notes
- -----
+ Notes
+ -----
+ License of L-BFGS-B (Fortran code):
- License of L-BFGS-B (Fortran code):
+ The version included here (in fortran code) is 2.1 (released in 1997).
+ It was written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal
+ <nocedal@ece.nwu.edu>. It carries the following condition for use:
- The version included here (in fortran code) is 2.1 (released in
- 1997). It was written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal
- <nocedal@ece.nwu.edu>. It carries the following condition for use:
+ This software is freely available, but we expect that all publications
+ describing work using this software , or all commercial products using it,
+ quote at least one of the references given below.
- This software is freely available, but we expect that all
- publications describing work using this software, or all
- commercial products using it, quote at least one of the references
- given below.
-
- References
- * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
- Constrained Optimization, (1995), SIAM Journal on Scientific and
- Statistical Computing , 16, 5, pp. 1190-1208.
- * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
- FORTRAN routines for large scale bound constrained optimization (1997),
- ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560.
+ References
+ ----------
+ * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
+ Constrained Optimization, (1995), SIAM Journal on Scientific and
+ Statistical Computing , 16, 5, pp. 1190-1208.
+ * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
+ FORTRAN routines for large scale bound constrained optimization (1997),
+ ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560.
"""
n = len(x0)
View
19 scipy/optimize/minpack.py
@@ -168,7 +168,8 @@ def fsolve(func, x0, args=(), fprime=None, full_output=0,
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None):
- """Minimize the sum of squares of a set of equations.
+ """
+ Minimize the sum of squares of a set of equations.
::
@@ -226,7 +227,7 @@ def leastsq(func, x0, args=(), Dfun=None, full_output=0,
residual standard deviation to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
- a dictionary of optional outputs with the keys::
+ a dictionary of optional outputs with the key s::
- 'nfev' : the number of function calls
- 'fvec' : the function evaluated at the output
@@ -242,6 +243,7 @@ def leastsq(func, x0, args=(), Dfun=None, full_output=0,
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
- 'qtf' : the vector (transpose(q) * fvec).
+
mesg : str
A string message giving information about the cause of failure.
ier : int
@@ -253,6 +255,19 @@ def leastsq(func, x0, args=(), Dfun=None, full_output=0,
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
+ cov_x is a Jacobian approximation to the Hessian of the least squares
+ objective function.
+ This approximation assumes that the objective function is based on the
+ difference between some observed target data (ydata) and a (non-linear)
+ function of the parameters `f(xdata, params)` ::
+
+ func(params) = ydata - f(xdata, params)
+
+ so that the objective function is ::
+
+ min sum((ydata - f(xdata, params))**2, axis=0)
+ params
+
"""
x0 = array(x0, ndmin=1)
n = len(x0)
View
16 scipy/optimize/optimize.py
@@ -178,7 +178,8 @@ def function_wrapper(x):
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None):
- """Minimize a function using the downhill simplex algorithm.
+ """
+ Minimize a function using the downhill simplex algorithm.
Parameters
----------
@@ -208,7 +209,7 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
allvecs : list
Solution at each iteration.
- Other Parameters
+ Other parameters
----------------
xtol : float
Relative error in xopt acceptable for convergence.
@@ -219,16 +220,16 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
maxfun : number
Maximum number of function evaluations to make.
full_output : bool
- Set to True if fval and warnflag outputs are desired.
+ Set to True if fopt and warnflag outputs are desired.
disp : bool
Set to True to print convergence messages.
retall : bool
Set to True to return list of solutions at each iteration.
Notes
-----
- Uses a Nelder-Mead simplex algorithm to find the minimum of
- a function of one or more variables.
+ Uses a Nelder-Mead simplex algorithm to find the minimum of function of
+ one or more variables.
"""
fcalls, func = wrap_function(func, args)
@@ -1422,7 +1423,8 @@ def myfunc(alpha):
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
- """Minimize a function using modified Powell's method.
+ """
+ Minimize a function using modified Powell's method.
Parameters
----------
@@ -1431,7 +1433,7 @@ def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
x0 : ndarray
Initial guess.
args : tuple
- Eextra arguments passed to func.
+ Extra arguments passed to func.
callback : callable
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
View
6 scipy/optimize/slsqp.py
@@ -66,22 +66,22 @@ def fmin_slsqp( func, x0 , eqcons=[], f_eqcons=None, ieqcons=[], f_ieqcons=None,
----------
func : callable f(x,*args)
Objective function.
- x0 : ndarray of float
+ x0 : 1-D ndarray of float
Initial guess for the independent variable(s).
eqcons : list
A list of functions of length n such that
eqcons[j](x0,*args) == 0.0 in a successfully optimized
problem.
f_eqcons : callable f(x,*args)
- Returns an array in which each element must equal 0.0 in a
+ Returns a 1-D array in which each element must equal 0.0 in a
successfully optimized problem. If f_eqcons is specified,
eqcons is ignored.
ieqcons : list
A list of functions of length n such that
ieqcons[j](x0,*args) >= 0.0 in a successfully optimized
problem.
f_ieqcons : callable f(x0,*args)
- Returns an array in which each element must be greater or
+ Returns a 1-D ndarray in which each element must be greater or
equal to 0.0 in a successfully optimized problem. If
f_ieqcons is specified, ieqcons is ignored.
bounds : list
View
11 scipy/optimize/tnc.py
@@ -83,21 +83,22 @@ def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
rescale=-1, disp=None):
- """Minimize a function with variables subject to bounds, using
+ """
+ Minimize a function with variables subject to bounds, using
gradient information.
Parameters
----------
- func : callable func(x, *args)
+ func : callable ``func(x, *args)``
Function to minimize. Should return f and g, where f is
the value of the function and g its gradient (a list of
floats). If the function returns None, the minimization
is aborted.
x0 : list of floats
Initial estimate of minimum.
- fprime : callable fprime(x, *args)
+ fprime : callable ``fprime(x, *args)``
Gradient of func. If None, then func must return the
- function value and the gradient (f,g = func(x, *args)).
+ function value and the gradient (``f,g = func(x, *args)``).
args : tuple
Arguments to pass to function.
approx_grad : bool
@@ -164,7 +165,7 @@ def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
The solution.
nfeval : int
The number of function evaluations.
- rc :
+ rc : int
Return code as defined in the RCSTRINGS dict.
"""

0 comments on commit 9bb3d05

Please sign in to comment.
Something went wrong with that request. Please try again.