Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

DOC: merge doc wiki edits. Thanks to everyone who contributed.

  • Loading branch information...
commit b853c347d46a5095870c62db83962288ec1ef4a7 1 parent 0199055
@rgommers rgommers authored
View
178 doc/source/tutorial/optimize.rst
@@ -378,24 +378,26 @@ The following script shows examples for how constraints can be specified.
"""
This script tests fmin_slsqp using Example 14.4 from Numerical Methods for
Engineers by Steven Chapra and Raymond Canale. This example maximizes the
- function f(x) = 2*x*y + 2*x - x**2 - 2*y**2, which has a maximum at x=2,y=1.
+ function f(x) = 2*x0*x1 + 2*x0 - x0**2 - 2*x1**2, which has a maximum
+ at x0=2, x1=1.
"""
from scipy.optimize import fmin_slsqp
- from numpy import array, asfarray, finfo,ones, sqrt, zeros
+ from numpy import array
-
- def testfunc(d, *args):
+ def testfunc(x, *args):
"""
Parameters
----------
- d : list
+ d : list
A list of two elements, where d[0] represents x and
d[1] represents y in the following equation.
- sign : float
- A multiplier for f. Since we want to optimize it, and the scipy
- optimizers can only minimize functions, we need to multiply it by
- -1 to achieve the desired solution.
+ args : tuple
+ First element of args is a multiplier for f.
+ Since the objective function should be maximized, and the scipy
+ optimizers can only minimize functions, it is nessessary to
+ multiply the objective function by -1 to achieve the desired
+ solution.
Returns
-------
res : float
@@ -406,134 +408,104 @@ The following script shows examples for how constraints can be specified.
sign = args[0]
except:
sign = 1.0
- x = d[0]
- y = d[1]
- return sign*(2*x*y + 2*x - x**2 - 2*y**2)
+ return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)
- def testfunc_deriv(d,*args):
+ def testfunc_deriv(x,*args):
""" This is the derivative of testfunc, returning a numpy array
- representing df/dx and df/dy
-
- """
+ representing df/dx and df/dy """
try:
sign = args[0]
except:
sign = 1.0
- x = d[0]
- y = d[1]
- dfdx = sign*(-2*x + 2*y + 2)
- dfdy = sign*(2*x - 4*y)
- return array([ dfdx, dfdy ],float)
+ dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
+ dfdx1 = sign*(2*x[0] - 4*x[1])
+ return array([ dfdx0, dfdx1 ])
+ def test_eqcons(x,*args):
+ """ Lefthandside of the equality constraint """
+ return array([ x[0]**3-x[1] ])
- from time import time
+ def test_ieqcons(x,*args):
+ """ Lefthandside of inequality constraint """
+ return array([ x[1]-1 ])
+
+ def test_fprime_eqcons(x,*args):
+ """ First derivative of equality constraint """
+ return array([ 3.0*(x[0]**2.0), -1.0 ])
- print '\n\n'
+ def test_fprime_ieqcons(x,*args):
+ """ First derivative of inequality constraint """
+ return array([ 0.0, 1.0 ])
+
+ from time import time
- print "Unbounded optimization. Derivatives approximated."
+ print "Unbounded optimization."
+ print "Derivatives of objective function approximated."
t0 = time()
- x = fmin_slsqp(testfunc, [-1.0,1.0], args=(-1.0,), iprint=2, full_output=1)
+ result = fmin_slsqp(testfunc, [-1.0,1.0], args=(-1.0,), iprint=2, full_output=1)
print "Elapsed time:", 1000*(time()-t0), "ms"
- print "Results",x
- print "\n\n"
+ print "Results", result, "\n\n"
- print "Unbounded optimization. Derivatives provided."
+ print "Unbounded optimization."
+ print "Derivatives of objective function provided."
t0 = time()
- x = fmin_slsqp(testfunc, [-1.0,1.0], args=(-1.0,), iprint=2, full_output=1)
+ result = fmin_slsqp(testfunc, [-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,),
+ iprint=2, full_output=1)
print "Elapsed time:", 1000*(time()-t0), "ms"
- print "Results",x
- print "\n\n"
+ print "Results", result, "\n\n"
- print "Bound optimization. Derivatives approximated."
+ print "Bound optimization (equality constraints)."
+ print "Constraints implemented via lambda function."
+ print "Derivatives of objective function approximated."
+ print "Derivatives of constraints approximated."
t0 = time()
- x = fmin_slsqp(testfunc, [-1.0,1.0], args=(-1.0,),
- eqcons=[lambda x, y: x[0]-x[1] ], iprint=2, full_output=1)
+ result = fmin_slsqp(testfunc, [-1.0,1.0], args=(-1.0,),
+ eqcons=[lambda x, args: x[0]-x[1] ], iprint=2, full_output=1)
print "Elapsed time:", 1000*(time()-t0), "ms"
- print "Results",x
- print "\n\n"
+ print "Results", result, "\n\n"
- print "Bound optimization (equality constraints). Derivatives provided."
+ print "Bound optimization (equality constraints)."
+ print "Constraints implemented via lambda."
+ print "Derivatives of objective function provided."
+ print "Derivatives of constraints approximated."
t0 = time()
- x = fmin_slsqp(testfunc, [-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,),
- eqcons=[lambda x, y: x[0]-x[1] ], iprint=2, full_output=1)
+ result = fmin_slsqp(testfunc, [-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,),
+ eqcons=[lambda x, args: x[0]-x[1] ], iprint=2, full_output=1)
print "Elapsed time:", 1000*(time()-t0), "ms"
- print "Results",x
- print "\n\n"
+ print "Results", result, "\n\n"
print "Bound optimization (equality and inequality constraints)."
- print "Derivatives provided."
-
+ print "Constraints implemented via lambda."
+ print "Derivatives of objective function provided."
+ print "Derivatives of constraints approximated."
t0 = time()
- x = fmin_slsqp(testfunc,[-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,),
- eqcons=[lambda x, y: x[0]-x[1] ],
- ieqcons=[lambda x, y: x[0]-.5], iprint=2, full_output=1)
+ result = fmin_slsqp(testfunc,[-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,),
+ eqcons=[lambda x, args: x[0]-x[1] ],
+ ieqcons=[lambda x, args: x[0]-.5], iprint=2, full_output=1)
print "Elapsed time:", 1000*(time()-t0), "ms"
- print "Results",x
- print "\n\n"
-
-
- def test_eqcons(d,*args):
- try:
- sign = args[0]
- except:
- sign = 1.0
- x = d[0]
- y = d[1]
- return array([ x**3-y ])
-
-
- def test_ieqcons(d,*args):
- try:
- sign = args[0]
- except:
- sign = 1.0
- x = d[0]
- y = d[1]
- return array([ y-1 ])
+ print "Results", result, "\n\n"
print "Bound optimization (equality and inequality constraints)."
- print "Derivatives provided via functions."
+ print "Constraints implemented via function."
+ print "Derivatives of objective function provided."
+ print "Derivatives of constraints approximated."
t0 = time()
- x = fmin_slsqp(testfunc, [-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,),
+ result = fmin_slsqp(testfunc, [-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,),
f_eqcons=test_eqcons, f_ieqcons=test_ieqcons,
iprint=2, full_output=1)
print "Elapsed time:", 1000*(time()-t0), "ms"
- print "Results",x
- print "\n\n"
-
-
- def test_fprime_eqcons(d,*args):
- try:
- sign = args[0]
- except:
- sign = 1.0
- x = d[0]
- y = d[1]
- return array([ 3.0*(x**2.0), -1.0 ])
-
-
- def test_fprime_ieqcons(d,*args):
- try:
- sign = args[0]
- except:
- sign = 1.0
- x = d[0]
- y = d[1]
- return array([ 0.0, 1.0 ])
+ print "Results", result, "\n\n"
print "Bound optimization (equality and inequality constraints)."
- print "Derivatives provided via functions."
- print "Constraint jacobians provided via functions"
+ print "Constraints implemented via function."
+ print "All derivatives provided."
t0 = time()
- x = fmin_slsqp(testfunc,[-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,),
- f_eqcons=test_eqcons, f_ieqcons=test_ieqcons,
- fprime_eqcons=test_fprime_eqcons,
- fprime_ieqcons=test_fprime_ieqcons, iprint=2, full_output=1)
+ result = fmin_slsqp(testfunc,[-1.0,1.0], fprime=testfunc_deriv, args=(-1.0,),
+ f_eqcons=test_eqcons, fprime_eqcons=test_fprime_eqcons,
+ f_ieqcons=test_ieqcons, fprime_ieqcons=test_fprime_ieqcons,
+ iprint=2, full_output=1)
print "Elapsed time:", 1000*(time()-t0), "ms"
- print "Results",x
- print "\n\n"
-
-
+ print "Results", result, "\n\n"
Scalar function minimizers
@@ -762,7 +734,7 @@ to the Laplace operator part: we know that in one dimension
.. math::
- \partial_x^2 \approx \frac{1}{h_x^2} \begin{pmatrix}
+ \partial_x^2 \approx \frac{1}{h_x^2} \begin{pmatrix}
-2 & 1 & 0 & 0 \cdots \\
1 & -2 & 1 & 0 \cdots \\
0 & 1 & -2 & 1 \cdots \\
View
72 scipy/cluster/hierarchy.py
@@ -1907,8 +1907,9 @@ def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
no_leaves=False, show_contracted=False,
link_color_func=None):
"""
- Plots the hiearchical clustering defined by the linkage Z as a
- dendrogram. The dendrogram illustrates how each cluster is
+ Plots the hierarchical clustering as a dendrogram.
+
+ The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
@@ -1930,19 +1931,17 @@ def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
large. Truncation is used to condense the dendrogram. There
are several modes:
- * None/'none': no truncation is performed (Default)
-
- * 'lastp': the last ``p`` non-singleton formed in the linkage
- are the only non-leaf nodes in the linkage; they correspond
- to to rows ``Z[n-p-2:end]`` in ``Z``. All other
- non-singleton clusters are contracted into leaf nodes.
-
- * 'mlab': This corresponds to MATLAB(TM) behavior. (not
- implemented yet)
+ * None/'none': no truncation is performed (Default)
+ * 'lastp': the last ``p`` non-singleton formed in the linkage
+ are the only non-leaf nodes in the linkage; they correspond
+ to to rows ``Z[n-p-2:end]`` in ``Z``. All other
+ non-singleton clusters are contracted into leaf nodes.
+ * 'mlab': This corresponds to MATLAB(TM) behavior. (not
+ implemented yet)
+ * 'level'/'mtica': no more than ``p`` levels of the
+ dendrogram tree are displayed. This corresponds to
+ Mathematica(TM) behavior.
- * 'level'/'mtica': no more than ``p`` levels of the
- dendrogram tree are displayed. This corresponds to
- Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
@@ -1961,19 +1960,16 @@ def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
- of the following strings
+ of the following strings:
- * 'top': plots the root at the top, and plot descendent
+ * 'top' plots the root at the top, and plot descendent
links going downwards. (default).
-
- * 'bottom': plots the root at the bottom, and plot descendent
+ * 'bottom'- plots the root at the bottom, and plot descendent
links going upwards.
-
- * 'left': plots the root at the left, and plot descendent
+ * 'left'- plots the root at the left, and plot descendent
links going right.
-
- * 'right': plots the root at the right, and plot descendent
- links going left.
+ * 'right'- plots the root at the right, and plot descendent
+ links going left.
labels : ndarray, optional
By default ``labels`` is ``None`` so the index of the
@@ -1987,13 +1983,11 @@ def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
- * False: nothing is done.
-
- * 'ascending'/True: the child with the minimum number of
- original objects in its cluster is plotted first.
-
- * 'descendent': the child with the maximum number of
- original objects in its cluster is plotted first.
+ * False: nothing is done.
+ * 'ascending'/True: the child with the minimum number of
+ original objects in its cluster is plotted first.
+ * 'descendent': the child with the maximum number of
+ original objects in its cluster is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be
``True``.
@@ -2003,13 +1997,11 @@ def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
- * False: nothing is done.
-
- * 'ascending'/True: the child with the minimum distance
- between its direct descendents is plotted first.
-
- * 'descending': the child with the maximum distance
- between its direct descendents is plotted first.
+ * False: nothing is done.
+ * 'ascending'/True: the child with the minimum distance
+ between its direct descendents is plotted first.
+ * 'descending': the child with the maximum distance
+ between its direct descendents is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be
``True``.
@@ -2040,7 +2032,7 @@ def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
leaf.
Indices :math:`k < n` correspond to original observations
- while indices :math:`k \geq n` correspond to non-singleton
+ while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
@@ -2070,9 +2062,9 @@ def llf(id):
function is expected to return the color to paint the link,
encoded as a matplotlib color string code.
- For example::
+ For example:
- dendrogram(Z, link_color_func=lambda k: colors[k])
+ >>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
View
52 scipy/cluster/vq.py
@@ -89,7 +89,8 @@ class ClusterError(Exception):
pass
def whiten(obs):
- """ Normalize a group of observations on a per feature basis.
+ """
+ Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
@@ -101,15 +102,12 @@ def whiten(obs):
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
- ::
-
- # f0 f1 f2
- obs = [[ 1., 1., 1.], #o0
- [ 2., 2., 2.], #o1
- [ 3., 3., 3.], #o2
- [ 4., 4., 4.]]) #o3
- XXX perhaps should have an axis variable here.
+ >>> # f0 f1 f2
+ >>> obs = [[ 1., 1., 1.], #o0
+ ... [ 2., 2., 2.], #o1
+ ... [ 3., 3., 3.], #o2
+ ... [ 4., 4., 4.]]) #o3
Returns
-------
@@ -134,14 +132,15 @@ def whiten(obs):
return obs / std_dev
def vq(obs, code_book):
- """ Vector Quantization: assign codes from a code book to observations.
+ """
+ Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
- observation vector in the M by N obs array is compared with the
+ observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
- The features in obs should have unit variance, which can be
+ The features in `obs` should have unit variance, which can be
acheived by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
@@ -149,20 +148,19 @@ def vq(obs, code_book):
Parameters
----------
obs : ndarray
- Each row of the NxM array is an observation. The columns are the
- "features" seen during each observation. The features must be
+ Each row of the 'N' x 'M' array is an observation. The columns are
+ the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
- ::
-
- # f0 f1 f2 f3
- code_book = [[ 1., 2., 3., 4.], #c0
- [ 1., 2., 3., 4.], #c1
- [ 1., 2., 3., 4.]]) #c2
+ >>> # f0 f1 f2 f3
+ >>> code_book = [
+ ... [ 1., 2., 3., 4.], #c0
+ ... [ 1., 2., 3., 4.], #c1
+ ... [ 1., 2., 3., 4.]]) #c2
Returns
-------
@@ -599,8 +597,8 @@ def _missing_raise():
def kmeans2(data, k, iter = 10, thresh = 1e-5, minit = 'random',
missing = 'warn'):
- """Classify a set of observations into k clusters using the k-means
- algorithm.
+ """
+ Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidian distance between
observations and centroids. Several initialization methods are
@@ -609,11 +607,11 @@ def kmeans2(data, k, iter = 10, thresh = 1e-5, minit = 'random',
Parameters
----------
data : ndarray
- A M by N array of M observations in N dimensions or a length
- M array of M one-dimensional observations.
+ A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
+ 'M' array of 'M' one-dimensional observations.
k : int or ndarray
The number of clusters to form as well as the number of
- centroids to generate. If minit initialization string is
+ centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int
@@ -621,7 +619,7 @@ def kmeans2(data, k, iter = 10, thresh = 1e-5, minit = 'random',
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float
- (not used yet).
+ (not used yet)
minit : string
Method for initialization. Available methods are 'random',
'points', 'uniform', and 'matrix':
@@ -641,7 +639,7 @@ def kmeans2(data, k, iter = 10, thresh = 1e-5, minit = 'random',
Returns
-------
centroid : ndarray
- A k by N array of centroids found at the last iteration of
+ A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
View
8 scipy/constants/codata.py
@@ -912,7 +912,7 @@ def precision(key) :
def find(sub=None, disp=False):
"""
- Return list of codata.physical_constant keys containing a given string
+ Return list of codata.physical_constant keys containing a given string.
Parameters
----------
@@ -924,9 +924,9 @@ def find(sub=None, disp=False):
Returns
-------
- keys : None or list
- If `disp` is False, the list of keys is returned. Otherwise, None
- is returned.
+ keys : list or None
+ If `disp` is False, the list of keys is returned.
+ Otherwise, None is returned.
See Also
--------
View
89 scipy/fftpack/basic.py
@@ -153,26 +153,31 @@ def _raw_fft(x, n, axis, direction, overwrite_x, work_function):
def fft(x, n=None, axis=-1, overwrite_x=0):
"""
- Return discrete Fourier transform of arbitrary type sequence x.
+ Return discrete Fourier transform of real or complex sequence.
+
+ The returned complex array contains ``y(0), y(1),..., y(n-1)`` where
+
+ ``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``.
Parameters
----------
- x : array-like
- array to fourier transform.
+ x : array_like
+ Array to Fourier transform.
n : int, optional
- Length of the Fourier transform. If n<x.shape[axis],
- x is truncated. If n>x.shape[axis], x is zero-padded.
- (Default n=x.shape[axis]).
+ Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+ default results in ``n = x.shape[axis]``.
axis : int, optional
- Axis along which the fft's are computed. (default=-1)
+ Axis along which the fft's are computed; the default is over the
+ last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
- If True the contents of x can be destroyed. (default=False)
+ If True the contents of `x` can be destroyed; the default is False.
Returns
-------
z : complex ndarray
with the elements:
- [y(0),y(1),..,y(n/2-1),y(-n/2),...,y(-1)] if n is even
+ [y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even
[y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd
where
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
@@ -191,12 +196,16 @@ def fft(x, n=None, axis=-1, overwrite_x=0):
terms, in order of decreasingly negative frequency. So for an 8-point
transform, the frequencies of the result are [ 0, 1, 2, 3, 4, -3, -2, -1].
+ For n even, A[n/2] contains the sum of the positive and negative-frequency
+ terms. For n even and x real, A[n/2] will always be real.
+
This is most efficient for n a power of two.
Examples
--------
+ >>> from scipy.fftpack import fft, ifft
>>> x = np.arange(5)
- >>> np.all(np.abs(x-fft(ifft(x))<1.e-15) #within numerical accuracy.
+ >>> np.allclose(fft(ifft(x)), x, atol=1e-15) #within numerical accuracy.
True
"""
@@ -423,35 +432,49 @@ def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function):
def fftn(x, shape=None, axes=None, overwrite_x=0):
- """ fftn(x, shape=None, axes=None, overwrite_x=0) -> y
-
- Return multi-dimensional discrete Fourier transform of arbitrary
- type sequence x.
+ """
+ Return multi-dimensional discrete Fourier transform of x.
- The returned array contains
+ The returned array contains::
y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
where d = len(x.shape) and n = x.shape.
- Note that y[..., -j_i, ...] = y[..., n_i-j_i, ...].conjugate().
-
- Optional input:
- shape
- Defines the shape of the Fourier transform. If shape is not
- specified then shape=take(x.shape,axes,axis=0).
- If shape[i]>x.shape[i] then the i-th dimension is padded with
- zeros. If shape[i]<x.shape[i], then the i-th dimension is
- truncated to desired length shape[i].
- axes
- The transform is applied along the given axes of the input
- array (or the newly constructed array if shape argument was
- used).
- overwrite_x
- If set to true, the contents of x can be destroyed.
-
- Notes:
- y == fftn(ifftn(y)) within numerical accuracy.
+ Note that ``y[..., -j_i, ...] = y[..., n_i-j_i, ...].conjugate()``.
+
+ Parameters
+ ----------
+ x : array_like
+ The (n-dimensional) array to transform.
+ shape : tuple of ints, optional
+ The shape of the result. If both `shape` and `axes` (see below) are
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+ not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
+ If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros.
+ If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to
+ length ``shape[i]``.
+ axes : array_like of ints, optional
+ The axes of `x` (`y` if `shape` is not None) along which the
+ transform is applied.
+ overwrite_x : bool, optional
+ If True, the contents of `x` can be destroyed. Default is False.
+
+ Returns
+ -------
+ y : complex-valued n-dimensional numpy array
+ The (n-dimensional) DFT of the input array.
+
+ See Also
+ --------
+ ifftn
+
+ Examples
+ --------
+ >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
+ >>> np.allclose(y, fftn(ifftn(y)))
+ True
+
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, 1)
View
8 scipy/fftpack/realtransforms.py
@@ -147,13 +147,13 @@ def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=0):
Notes
-----
For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
- matlab ``idct(x)``.
+ MATLAB ``idct(x)``.
'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3.
- IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type 3,
- and IDCT of type 3 is the DCT of type 2. For the definition of these types,
- see `dct`.
+ IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type
+ 3, and IDCT of type 3 is the DCT of type 2. For the definition of these
+ types, see `dct`.
"""
if type == 1 and norm is not None:
View
2  scipy/integrate/quadpack.py
@@ -222,7 +222,7 @@ def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
>>> from scipy import integrate
>>> x2 = lambda x: x**2
- >>> integrate.quad(x,0.,4.)
+ >>> integrate.quad(x2,0.,4.)
(21.333333333333332, 2.3684757858670003e-13)
>> print 4.**3/3
21.3333333333
View
52 scipy/io/matlab/mio.py
@@ -119,12 +119,35 @@ def loadmat(file_name, mdict=None, appendmat=True, **kwargs):
Parameters
----------
- %(file_arg)s
+ file_name : str
+ Name of the mat file (do not need .mat extension if
+ appendmat==True) Can also pass open file-like object.
m_dict : dict, optional
Dictionary in which to insert matfile variables.
- %(append_arg)s
- %(load_args)s
- %(struct_arg)s
+ appendmat : bool, optional
+ True to append the .mat extension to the end of the given
+ filename, if not already present.
+ byte_order : str or None, optional
+ None by default, implying byte order guessed from mat
+ file. Otherwise can be one of ('native', '=', 'little', '<',
+ 'BIG', '>').
+ mat_dtype : bool, optional
+ If True, return arrays in same dtype as would be loaded into
+ MATLAB (instead of the dtype with which they are saved).
+ squeeze_me : bool, optional
+ Whether to squeeze unit matrix dimensions or not.
+ chars_as_strings : bool, optional
+ Whether to convert char arrays to string arrays.
+ matlab_compatible : bool, optional
+ Returns matrices as would be loaded by MATLAB (implies
+ squeeze_me=False, chars_as_strings=False, mat_dtype=True,
+ struct_as_record=True).
+ struct_as_record : bool, optional
+ Whether to load MATLAB structs as numpy record arrays, or as
+ old-style numpy arrays with dtype=object. Setting this flag to
+ False replicates the behavior of scipy version 0.7.x (returning
+ numpy object arrays). The default setting is True, because it
+ allows easier round-trip load and save of MATLAB files.
variable_names : None or sequence
If None (the default) - read all variables in file. Otherwise
`variable_names` should be a sequence of strings, giving names of the
@@ -177,15 +200,26 @@ def savemat(file_name, mdict,
Name of the .mat file (.mat extension not needed if ``appendmat ==
True``).
Can also pass open file_like object.
- m_dict : dict
+ mdict : dict
Dictionary from which to save matfile variables.
- %(append_arg)s
+ appendmat : bool, optional
+ True (the default) to append the .mat extension to the end of the
+ given filename, if not already present.
format : {'5', '4'}, string, optional
'5' (the default) for MATLAB 5 and up (to 7.2),
'4' for MATLAB 4 .mat files
- %(long_fields)s
- %(do_compression)s
- %(oned_as)s
+ long_field_names : bool, optional
+ False (the default) - maximum field name length in a structure is
+ 31 characters which is the documented maximum length.
+ True - maximum field name length in a structure is 63 characters
+ which works for MATLAB 7.6+
+ do_compression : bool, optional
+ Whether or not to compress matrices on write. Default is False.
+ oned_as : {'column', 'row', None}, optional
+ If 'column', write 1-D numpy arrays as column vectors.
+ If 'row', write 1-D numpy arrays as row vectors.
+ If None (the default), the behavior depends on the value of `format`
+ (see Notes below).
See also
--------
View
13 scipy/io/netcdf.py
@@ -745,10 +745,23 @@ def __setattr__(self, attr, value):
self.__dict__[attr] = value
def isrec(self):
+ """Returns whether the variable has a record dimension or not.
+
+ A record dimension is a dimension along which additional data could be
+ easily appended in the netcdf data structure without much rewriting of
+ the data file. This attribute is a read-only property of the
+ `netcdf_variable`.
+
+ """
return self.data.shape and not self._shape[0]
isrec = property(isrec)
def shape(self):
+ """Returns the shape tuple of the data variable.
+
+ This is a read-only attribute and can not be modified in the
+ same manner of other numpy arrays.
+ """
return self.data.shape
shape = property(shape)
View
5 scipy/linalg/basic.py
@@ -131,9 +131,10 @@ def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
def solve_banded((l, u), ab, b, overwrite_ab=False, overwrite_b=False,
debug=False):
- """Solve the equation a x = b for x, assuming a is banded matrix.
+ """
+ Solve the equation a x = b for x, assuming a is banded matrix.
- The matrix a is stored in ab using the matrix diagonal orded form::
+ The matrix a is stored in ab using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
View
64 scipy/misc/pilutil.py
@@ -84,8 +84,8 @@ def imread(name,flatten=0):
Returns
-------
- : nd_array
- The array obtained by reading image.
+ imread : ndarray
+ The array obtained by reading image from file `name`.
Notes
-----
@@ -99,22 +99,34 @@ def imread(name,flatten=0):
def imsave(name, arr):
"""
- Save an array to an image file.
+ Save an array as an image.
Parameters
----------
- im : PIL image
- Input image.
+ filename : str
+ Output filename.
+ image : ndarray, MxN or MxNx3 or MxNx4
+ Array containing image values. If the shape is ``MxN``, the array
+ represents a grey-level image. Shape ``MxNx3`` stores the red, green
+ and blue bands along the last dimension. An alpha layer may be
+ included, specified as the last colour band of an ``MxNx4`` array.
- flatten : bool
- If true, convert the output to grey-scale.
+ Examples
+ --------
+ Construct an array of gradient intensity values and save to file:
- Returns
- -------
- img_array : ndarray
- The different colour bands/channels are stored in the
- third dimension, such that a grey-image is MxN, an
- RGB-image MxNx3 and an RGBA-image MxNx4.
+ >>> x = np.zeros((255, 255))
+ >>> x = np.zeros((255, 255), dtype=np.uint8)
+ >>> x[:] = np.arange(255)
+ >>> imsave('/tmp/gradient.png', x)
+
+ Construct an array with three colour bands (R, G, B) and store to file:
+
+ >>> rgb = np.zeros((255, 255, 3), dtype=np.uint8)
+ >>> rgb[..., 0] = np.arange(255)
+ >>> rgb[..., 1] = 55
+ >>> rgb[..., 2] = 1 - np.arange(255)
+ >>> imsave('/tmp/rgb_gradient.png', rgb)
"""
im = toimage(arr)
@@ -134,7 +146,7 @@ def fromimage(im, flatten=0):
Returns
-------
- img_array : ndarray
+ fromimage : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
@@ -270,17 +282,17 @@ def imrotate(arr,angle,interp='bilinear'):
Returns
-------
- : nd_array
+ imrotate : nd_array
The rotated array of image.
Notes
-----
Interpolation methods can be:
- * 'nearest' : for nearest neighbor
- * 'bilinear' : for bilinear
- * 'cubic' : cubic
- * 'bicubic' : for bicubic
+ * 'nearest' : for nearest neighbor
+ * 'bilinear' : for bilinear
+ * 'cubic' : cubic
+ * 'bicubic' : for bicubic
"""
arr = asarray(arr)
@@ -344,16 +356,16 @@ def imresize(arr, size, interp='bilinear', mode=None):
* float - Fraction of current size.
* tuple - Size of the output image.
- interp : string
- interpolation to use for re-sizing ('nearest', 'bilinear', 'bicubic' or 'cubic')
+ interp : str
+ Interpolation to use for re-sizing ('nearest', 'bilinear', 'bicubic'
+ or 'cubic').
- mode :
- mode is the PIL image mode ('P', 'L', etc.)
+ mode : str
+ The PIL image mode ('P', 'L', etc.).
Returns
-------
-
- : nd_array
+ imresize : ndarray
The resized array of image.
"""
@@ -385,7 +397,7 @@ def imfilter(arr,ftype):
Returns
-------
- res : nd_array
+ imfilter : ndarray
The array with filter applied.
Raises
View
3  scipy/ndimage/filters.py
@@ -602,7 +602,7 @@ def convolve(input, weights, output = None, mode = 'reflect', cval = 0.0,
.... [5, 3, 0, 4],
.... [0, 0, 0, 7],
.... [9, 3, 0, 0]])
- >>> b = np.array([[1,1,1],[1,1,0],[1,0,0]])
+ >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
@@ -626,6 +626,7 @@ def convolve(input, weights, output = None, mode = 'reflect', cval = 0.0,
[1, 0, 0],
[0, 0, 0]])
>>> k = np.array([[0,1,0],[0,1,0],[0,1,0]])
+ >>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
View
10 scipy/ndimage/interpolation.py
@@ -256,25 +256,25 @@ def map_coordinates(input, coordinates, output=None, order=3,
Examples
--------
- >>> import scipy.ndimage
+ >>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
- >>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
+ >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
- >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
- >>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
+ >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
- >>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
+ >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
View
7 scipy/odr/models.py
@@ -80,7 +80,8 @@ def _exp_est(data):
'TeXequ':'$y=\\beta_0 + \sum_{i=1}^m \\beta_i x_i$'})
def polynomial(order):
- """ Factory function for a general polynomial model.
+ """
+ Factory function for a general polynomial model.
Parameters
----------
@@ -93,7 +94,9 @@ def polynomial(order):
Returns
-------
- model : Model instance
+ polynomial : Model instance
+ Model instance.
+
"""
powers = np.asarray(order)
View
205 scipy/odr/odrpack.py
@@ -219,77 +219,81 @@ def _report_error(info):
class Data(object):
- """ The Data class stores the data to fit.
+ """
+ scipy.odr.Data(x, y=None, we=None, wd=None, fix=None, meta={})
- Each argument is attached to the member of the instance of the same name.
- The structures of x and y are described in the Model class docstring. If
- y is an integer, then the Data instance can only be used to fit with
- implicit models where the dimensionality of the response is equal to the
- specified value of y. The structures of wd and we are described below. meta
- is an freeform dictionary for application-specific use.
-
- we weights the effect a deviation in the response variable has on the fit.
- wd weights the effect a deviation in the input variable has on the fit. To
- handle multidimensional inputs and responses easily, the structure of these
- arguments has the n'th dimensional axis first. These arguments heavily use
- the structured arguments feature of ODRPACK to conveniently and flexibly
- support all options. See the ODRPACK User's Guide for a full explanation of
- how these weights are used in the algorithm. Basically, a higher value of
- the weight for a particular data point makes a deviation at that point more
- detrimental to the fit.
+ The Data class stores the data to fit.
- we -- if we is a scalar, then that value is used for all data points (and
+ Parameters
+ ----------
+ x : array_like
+ Input data for regression.
+ y : array_like, optional
+ Input data for regression.
+ we : array_like, optional
+ If `we` is a scalar, then that value is used for all data points (and
all dimensions of the response variable).
-
- If we is a rank-1 array of length q (the dimensionality of the response
- variable), then this vector is the diagonal of the covariant weighting
- matrix for all data points.
-
- If we is a rank-1 array of length n (the number of data points), then
+ If `we` is a rank-1 array of length q (the dimensionality of the
+ response variable), then this vector is the diagonal of the covariant
+ weighting matrix for all data points.
+ If `we` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th response variable
observation (single-dimensional only).
-
- If we is a rank-2 array of shape (q, q), then this is the full covariant
- weighting matrix broadcast to each observation.
-
- If we is a rank-2 array of shape (q, n), then we[:,i] is the diagonal of
- the covariant weighting matrix for the i'th observation.
-
- If we is a rank-3 array of shape (q, q, n), then we[:,:,i] is the full
- specification of the covariant weighting matrix for each observation.
-
+ If `we` is a rank-2 array of shape (q, q), then this is the full
+ covariant weighting matrix broadcast to each observation.
+ If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
+ diagonal of the covariant weighting matrix for the i'th observation.
+ If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
+ full specification of the covariant weighting matrix for each
+ observation.
If the fit is implicit, then only a positive scalar value is used.
-
- wd -- if wd is a scalar, then that value is used for all data points
- (and all dimensions of the input variable). If wd = 0, then the
+ wd : array_like, optional
+ If `wd` is a scalar, then that value is used for all data points
+ (and all dimensions of the input variable). If `wd` = 0, then the
covariant weighting matrix for each observation is set to the identity
matrix (so each dimension of each observation has the same weight).
-
- If wd is a rank-1 array of length m (the dimensionality of the input
+ If `wd` is a rank-1 array of length m (the dimensionality of the input
variable), then this vector is the diagonal of the covariant weighting
matrix for all data points.
-
- If wd is a rank-1 array of length n (the number of data points), then
+ If `wd` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th input variable observation
(single-dimensional only).
+ If `wd` is a rank-2 array of shape (m, m), then this is the full
+ covariant weighting matrix broadcast to each observation.
+ If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the
+ diagonal of the covariant weighting matrix for the i'th observation.
+ If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the
+ full specification of the covariant weighting matrix for each
+ observation.
+ fix : array_like of ints, optional
+ The `fix` argument is the same as ifixx in the class ODR. It is an
+ array of integers with the same shape as data.x that determines which
+ input observations are treated as fixed. One can use a sequence of
+ length m (the dimensionality of the input observations) to fix some
+ dimensions for all observations. A value of 0 fixes the observation,
+ a value > 0 makes it free.
+ meta : dict, optional
+ Freeform dictionary for metadata.
- If wd is a rank-2 array of shape (m, m), then this is the full covariant
- weighting matrix broadcast to each observation.
-
- If wd is a rank-2 array of shape (m, n), then wd[:,i] is the diagonal of
- the covariant weighting matrix for the i'th observation.
-
- If wd is a rank-3 array of shape (m, m, n), then wd[:,:,i] is the full
- specification of the covariant weighting matrix for each observation.
-
- fix -- fix is the same as ifixx in the class ODR. It is an array of integers
- with the same shape as data.x that determines which input observations
- are treated as fixed. One can use a sequence of length m (the
- dimensionality of the input observations) to fix some dimensions for all
- observations. A value of 0 fixes the observation, a value > 0 makes it
- free.
+ Notes
+ -----
+ Each argument is attached to the member of the instance of the same name.
+ The structures of `x` and `y` are described in the Model class docstring.
+ If `y` is an integer, then the Data instance can only be used to fit with
+ implicit models where the dimensionality of the response is equal to the
+ specified value of `y`.
+
+ The `we` argument weights the effect a deviation in the response variable
+ has on the fit. The `wd` argument weights the effect a deviation in the
+ input variable has on the fit. To handle multidimensional inputs and
+ responses easily, the structure of these arguments has the n'th
+ dimensional axis first. These arguments heavily use the structured
+ arguments feature of ODRPACK to conveniently and flexibly support all
+ options. See the ODRPACK User's Guide for a full explanation of how these
+ weights are used in the algorithm. Basically, a higher value of the weight
+ for a particular data point makes a deviation at that point more
+ detrimental to the fit.
- meta -- optional, freeform dictionary for metadata
"""
def __init__(self, x, y=None, we=None, wd=None, fix=None, meta={}):
@@ -437,36 +441,41 @@ class Model(object):
can provide a function that will provide reasonable starting values
for the fit parameters possibly given the set of data.
- The initialization method stores these into members of the same name.
-
- fcn -- fit function
+ Parameters
+ ----------
+ fcn : function
fcn(beta, x) --> y
+ fjacb : function
+ Jacobian of fcn wrt the fit parameters beta.
- fjacb -- Jacobian of fcn wrt the fit parameters beta
fjacb(beta, x) --> @f_i(x,B)/@B_j
+ fjacd : function
+ Jacobian of fcn wrt the (possibly multidimensional) input
+ variable.
- fjacd -- Jacobian of fcn wrt the (possibly multidimensional) input
- variable
fjacd(beta, x) --> @f_i(x,B)/@x_j
+ extra_args : tuple, optional
+ If specified, `extra_args` should be a tuple of extra
+ arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called
+ by `apply(fcn, (beta, x) + extra_args)`
+ estimate : array_like of rank-1
+ Provides estimates of the fit parameters from the data
- extra_args -- if specified, extra_args should be a tuple of extra
- arguments to pass to fcn, fjacb, and fjacd. Each will be called
- like the following: apply(fcn, (beta, x) + extra_args)
-
- estimate -- provide estimates of the fit parameters from the data:
estimate(data) --> estbeta
-
- implicit -- boolean variable which, if TRUE, specifies that the model
- is implicit; i.e fcn(beta, x) ~= 0 and there is no y data to fit
+ implicit : boolean
+ If TRUE, specifies that the model
+ is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit
against
-
- meta -- optional
+ meta : dict, optional
freeform dictionary of metadata for the model
- Note that the fcn, fjacb, and fjacd operate on NumPy arrays and return
- a NumPy array. The `estimate` object takes an instance of the Data class.
+ Notes
+ -----
+ Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and
+ return a NumPy array. The `estimate` object takes an instance of the
+ Data class.
- Here are the rules for the shapes of the argument and return arrays:
+ Here are the rules for the shapes of the argument and return arrays :
x -- if the input data is single-dimensional, then x is rank-1
array; i.e. x = array([1, 2, 3, ...]); x.shape = (n,)
@@ -622,89 +631,89 @@ class ODR(object):
Parameters
----------
- data:
+ data : Data class instance
instance of the Data class
- model:
+ model : Model class instance
instance of the Model class
- beta0:
+ beta0 : array_like of rank-1
a rank-1 sequence of initial parameter values. Optional if
model provides an "estimate" function to estimate these values.
- delta0: optional
+ delta0 : array_like of floats of rank-1, optional
a (double-precision) float array to hold the initial values of
the errors in the input variables. Must be same shape as data.x
- ifixb: optional
+ ifixb : array_like of ints of rank-1, optional
sequence of integers with the same length as beta0 that determines
which parameters are held fixed. A value of 0 fixes the parameter,
a value > 0 makes the parameter free.
- ifixx: optional
+ ifixx : array_like of ints with same shape as data.x, optional
an array of integers with the same shape as data.x that determines
which input observations are treated as fixed. One can use a sequence
of length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
- job: optional
+ job : int, optional
an integer telling ODRPACK what tasks to perform. See p. 31 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_job post-initialization for a more readable interface.
- iprint: optional
+ iprint : int, optional
an integer telling ODRPACK what to print. See pp. 33-34 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_iprint post-initialization for a more readable interface.
- errfile: optional
+ errfile : str, optional
string with the filename to print ODRPACK errors to. *Do Not Open
This File Yourself!*
- rptfile: optional
+ rptfile : str, optional
string with the filename to print ODRPACK summaries to. *Do Not
Open This File Yourself!*
- ndigit: optional
+ ndigit : int, optional
integer specifying the number of reliable digits in the computation
of the function.
- taufac: optional
+ taufac : float, optional
float specifying the initial trust region. The default value is 1.
The initial trust region is equal to taufac times the length of the
first computed Gauss-Newton step. taufac must be less than 1.
- sstol: optional
+ sstol : float, optional
float specifying the tolerance for convergence based on the relative
change in the sum-of-squares. The default value is eps**(1/2) where eps
is the smallest value such that 1 + eps > 1 for double precision
computation on the machine. sstol must be less than 1.
- partol: optional
+ partol : float, optional
float specifying the tolerance for convergence based on the relative
change in the estimated parameters. The default value is eps**(2/3) for
explicit models and eps**(1/3) for implicit models. partol must be less
than 1.
- maxit: optional
+ maxit : int, optional
integer specifying the maximum number of iterations to perform. For
first runs, maxit is the total number of iterations performed and
defaults to 50. For restarts, maxit is the number of additional
iterations to perform and defaults to 10.
- stpb: optional
+ stpb : array_like, optional
sequence (len(stpb) == len(beta0)) of relative step sizes to compute
finite difference derivatives wrt the parameters.
- stpd: optional
+ stpd : optional
array (stpd.shape == data.x.shape or stpd.shape == (m,)) of relative
step sizes to compute finite difference derivatives wrt the input
variable errors. If stpd is a rank-1 array with length m (the
dimensionality of the input variable), then the values are broadcast to
all observations.
- sclb: optional
+ sclb : array_like, optional
sequence (len(stpb) == len(beta0)) of scaling factors for the
parameters. The purpose of these scaling factors are to scale all of
the parameters to around unity. Normally appropriate scaling factors
are computed if this argument is not specified. Specify them yourself
if the automatic procedure goes awry.
- scld: optional
+ scld : array_like, optional
array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling
factors for the *errors* in the input variables. Again, these factors
are automatically computed if you do not provide them. If scld.shape ==
(m,), then the scaling factors are broadcast to all observations.
- work: optional
+ work : ndarray, optional
array to hold the double-valued working data for ODRPACK. When
restarting, takes the value of self.output.work.
- iwork: optional
+ iwork : ndarray, optional
array to hold the integer-valued working data for ODRPACK. When
restarting, takes the value of self.output.iwork.
- output:
+ output : Output class instance
an instance if the Output class containing all of the returned
data from an invocation of ODR.run() or ODR.restart()
View
7 scipy/optimize/lbfgsb.py
@@ -44,15 +44,15 @@ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
Parameters
----------
- func : callable f(x, *args)
+ func : callable f(x,*args)
Function to minimise.
x0 : ndarray
Initial guess.
- fprime : callable fprime(x, *args)
+ fprime : callable fprime(x,*args)
The gradient of `func`. If None, then `func` returns the function
value and the gradient (``f, g = func(x, *args)``), unless
`approx_grad` is True in which case `func` returns only ``f``.
- args : tuple
+ args : sequence
Arguments to pass to `func` and `fprime`.
approx_grad : bool
Whether to approximate the gradient numerically (in which case
@@ -98,6 +98,7 @@ def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
Information dictionary.
* d['warnflag'] is
+
- 0 if converged,
- 1 if too many function evaluations,
- 2 if stopped for another reason, given in d['task']
View
20 scipy/optimize/optimize.py
@@ -180,7 +180,7 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using the downhill simplex algorithm. This algorithm
- only uses function values, not dervatives or second dervatives.
+ only uses function values, not derivatives or second derivatives.
Parameters
----------
@@ -230,23 +230,23 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
- one or more variables.
+ one or more variables.
This algorithm has a long history of successful use in applications.
- But it will usually be slower than an algorithm that uses first or
- second derivative information. In practice it can have poor
- performance in high-dimensional problems and is not robust to
+ But it will usually be slower than an algorithm that uses first or
+ second derivative information. In practice it can have poor
+ performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is not
- a complete theory describing when the algorithm will successfully
- find the minimum or the speed of convergence.
+ a complete theory describing when the algorithm will successfully
+ find the minimum or the speed of convergence.
References
----------
Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
- Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
- Respectable", in Numerical Analysis 1995, Proceedings of the
- 1995 Dundee Biennial Conference in Numerical Analysis, D.F.
+ Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
+ Respectable", in Numerical Analysis 1995, Proceedings of the
+ 1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
View
19 scipy/optimize/slsqp.py
@@ -17,17 +17,18 @@
_epsilon = sqrt(finfo(float).eps)
def approx_jacobian(x,func,epsilon,*args):
- """Approximate the Jacobian matrix of a callable function.
+ """
+ Approximate the Jacobian matrix of a callable function.
Parameters
----------
x : array_like
The state vector at which to compute the Jacobian matrix.
- func : callable f(x, *args)
+ func : callable f(x,*args)
The vector-valued function.
- epsilon : float\
- The peturbation used to determine the partial derivatives.
- *args : tuple
+ epsilon : float
+ The perturbation used to determine the partial derivatives.
+ args : sequence
Additional arguments passed to func.
Returns
@@ -70,7 +71,7 @@ def fmin_slsqp( func, x0 , eqcons=[], f_eqcons=None, ieqcons=[], f_ieqcons=None,
Initial guess for the independent variable(s).
eqcons : list
A list of functions of length n such that
- eqcons[j](x0,*args) == 0.0 in a successfully optimized
+ eqcons[j](x,*args) == 0.0 in a successfully optimized
problem.
f_eqcons : callable f(x,*args)
Returns a 1-D array in which each element must equal 0.0 in a
@@ -78,9 +79,9 @@ def fmin_slsqp( func, x0 , eqcons=[], f_eqcons=None, ieqcons=[], f_ieqcons=None,
eqcons is ignored.
ieqcons : list
A list of functions of length n such that
- ieqcons[j](x0,*args) >= 0.0 in a successfully optimized
+ ieqcons[j](x,*args) >= 0.0 in a successfully optimized
problem.
- f_ieqcons : callable f(x0,*args)
+ f_ieqcons : callable f(x,*args)
Returns a 1-D ndarray in which each element must be greater or
equal to 0.0 in a successfully optimized problem. If
f_ieqcons is specified, ieqcons is ignored.
@@ -122,7 +123,7 @@ def fmin_slsqp( func, x0 , eqcons=[], f_eqcons=None, ieqcons=[], f_ieqcons=None,
Returns
-------
- x : ndarray of float
+ out : ndarray of float
The final minimizer of func.
fx : ndarray of float, if full_output is true
The final value of the objective function.
View
13 scipy/optimize/zeros.py
@@ -43,10 +43,11 @@ def results_c(full_output, r):
# Newton-Raphson method
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50):
- """Find a zero using the Newton-Raphson or secant method.
+ """
+ Find a zero using the Newton-Raphson or secant method.
Find a zero of the function `func` given a nearby starting point `x0`.
- The Newton-Rapheson method is used if the derivative `fprime` of `func`
+ The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used.
Parameters
@@ -80,16 +81,16 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50):
Notes
-----
- The convergence rate of the Newton-Rapheson method is quadratic while
+ The convergence rate of the Newton-Raphson method is quadratic while
that of the secant method is somewhat less. This means that if the
function is well behaved the actual error in the estimated zero is
- approximatly the square of the requested tolerance up to roundoff
+ approximately the square of the requested tolerance up to roundoff
error. However, the stopping criterion used here is the step size and
- there is no quarantee that a zero has been found. Consequently the
+ there is no guarantee that a zero has been found. Consequently the
result should be verified. Safer algorithms are brentq, brenth, ridder,
and bisect, but they all require that the root first be bracketed in an
interval where the function changes sign. The brentq algorithm is
- recommended for general use in one dimemsional problems when such an
+ recommended for general use in one dimensional problems when such an
interval has been found.
"""
View
5 scipy/spatial/distance.py
@@ -480,13 +480,13 @@ def seuclidean(u, v, V):
def cityblock(u, v):
- r"""
+ """
Computes the Manhattan distance between two n-vectors u and v,
which is defined as
.. math::
- \sum_i {(u_i-v_i)}.
+ \\sum_i {\\left| u_i - v_i \\right|}.
Parameters
----------
@@ -499,6 +499,7 @@ def cityblock(u, v):
-------
d : double
The City Block distance between vectors ``u`` and ``v``.
+
"""
u = _validate_vector(u)
v = _validate_vector(v)
View
28 scipy/stats/distributions.py
@@ -1240,7 +1240,7 @@ def pdf(self,x,*args,**kwds):
Returns
-------
- pdf : array_like
+ pdf : ndarray
Probability density function evaluated at x
"""
@@ -1432,7 +1432,10 @@ def sf(self,x,*args,**kwds):
def logsf(self,x,*args,**kwds):
"""
- Log of the Survival function log(1-cdf) at x of the given RV.
+ Log of the survival function of the given RV.
+
+ Returns the log of the "survival function," defined as (1 - `cdf`),
+ evaluated at `x`.
Parameters
----------
@@ -1448,8 +1451,9 @@ def logsf(self,x,*args,**kwds):
Returns
-------
- logsf : array_like
- Log of the survival function evaluated at x
+ logsf : ndarray
+ Log of the survival function evaluated at `x`.
+
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
@@ -1688,16 +1692,13 @@ def moment(self, n, *args, **kwds):
Parameters
----------
n: int, n>=1
- order of moment
-
+ Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
- instance object for more information)
-
- loc : float, optional
- location parameter (default=0)
- scale : float, optional
- scale parameter (default=1)
+ instance object for more information).
+ kwds : keyword arguments, optional
+ These can include "loc" and "scale", as well as other keyword
+ arguments relevant for a given distribution.
"""
loc = kwds.get('loc', 0)
@@ -5825,7 +5826,6 @@ def logpmf(self, k,*args, **kwds):
"""
Log of the probability mass function at k of the given RV.
-
Parameters
----------
k : array_like
@@ -5834,7 +5834,7 @@ def logpmf(self, k,*args, **kwds):
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
- location parameter (default=0)
+ Location parameter. Default is 0.
Returns
-------
View
9 scipy/stats/kde.py
@@ -54,18 +54,17 @@ class gaussian_kde(object):
kde(points) : array
same as kde.evaluate(points)
kde.integrate_gaussian(mean, cov) : float
- multiply pdf with a specified Gaussian and integrate over the whole domain
+ multiply pdf with a specified Gaussian and integrate over the whole
+ domain
kde.integrate_box_1d(low, high) : float
integrate pdf (1D only) between two bounds
kde.integrate_box(low_bounds, high_bounds) : float
- integrate pdf over a rectangular space between low_bounds and high_bounds
+ integrate pdf over a rectangular space between low_bounds and
+ high_bounds
kde.integrate_kde(other_kde) : float
integrate two kernel density estimates multiplied together
kde.resample(size=None) : array
randomly sample a dataset from the estimated pdf.
-
- Internal Methods
- ----------------
kde.covariance_factor() : float
computes the coefficient that multiplies the data covariance matrix to
obtain the kernel covariance matrix. Set this method to
View
10 scipy/stats/mstats_basic.py
@@ -1573,7 +1573,7 @@ def skewtest(a, axis=0):
Notes
-----
- The sample size should be at least 8.
+ The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
@@ -1668,9 +1668,9 @@ def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
"""
Computes empirical quantiles for a data array.
- Samples quantile are defined by :math:`Q(p) = (1-g).x[i] +g.x[i+1]`,
- where :math:`x[j]` is the *j*th order statistic, and
- `i = (floor(n*p+m))`, `m=alpha+p*(1-alpha-beta)` and `g = n*p + m - i`.
+ Samples quantile are defined by ``Q(p) = (1-g).x[i] +g.x[i+1]``,
+ where ``x[j]`` is the j-th order statistic, ``i = (floor(n*p+m))``,
+ ``m=alpha+p*(1-alpha-beta)`` and ``g = n*p + m - i``.
Typical values of (alpha,beta) are:
- (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4)
@@ -1707,7 +1707,7 @@ def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
Returns
-------
- quants : MaskedArray
+ mquantiles : MaskedArray
An array containing the calculated quantiles.
Examples
View
7 scipy/stats/rv.py
@@ -11,12 +11,15 @@
######################################
def randwppf(ppf, args=(), size=None):
- """returns an array of randomly distributed integers of a distribution
- whose percent point function (inverse of the CDF) is given.
+ """
+ returns an array of randomly distributed integers of a distribution
+ whose percent point function (inverse of the CDF or quantile function)
+ is given.
args is a tuple of extra arguments to the ppf function (i.e. shape,
location, scale), and size is the size of the output. Note the ppf
function must accept an array of q values to compute over.
+
"""
U = random_sample(size=size)
return apply(ppf, (U,)+args)
View
16 scipy/stats/stats.py
@@ -2607,12 +2607,6 @@ def kendalltau(x, y, initial_lexsort=True):
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
- References
- ----------
- W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
- Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
- No. 314, Part 1, pp. 436-439, 1966.
-
Notes
-----
The definition of Kendall's tau that is used is::
@@ -2621,8 +2615,14 @@ def kendalltau(x, y, initial_lexsort=True):
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
- `y`. If a tie occurs for the same pair in both `x` and `y`, it is not added
- to either T or U.
+ `y`. If a tie occurs for the same pair in both `x` and `y`, it is not
+ added to either T or U.
+
+ References
+ ----------
+ W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
+ Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
+ No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
Please sign in to comment.
Something went wrong with that request. Please try again.