Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

DOC: fix more doc wiki edits.

  • Loading branch information...
commit 0f179f03a47e46879e91406bb031cb3d3c359428 1 parent 55a9259
@rgommers rgommers authored
View
15 scipy/cluster/hierarchy.py
@@ -232,9 +232,7 @@ def _randdm(pnts):
def single(y):
"""
- Performs single/min/nearest linkage on the condensed distance
- matrix ``y``. See ``linkage`` for more information on the return
- structure and algorithm.
+ Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
@@ -257,9 +255,7 @@ def single(y):
def complete(y):
"""
- Performs complete complete/max/farthest point linkage on the
- condensed distance matrix ``y``. See ``linkage`` for more
- information on the return structure and algorithm.
+ Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
@@ -274,6 +270,10 @@ def complete(y):
the ``linkage`` function documentation for more information
on its structure.
+ See Also
+ --------
+ linkage
+
"""
return linkage(y, method='complete', metric='euclidean')
@@ -1978,7 +1978,6 @@ def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
Note ``distance_sort`` and ``count_sort`` cannot both be
``True``.
-
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
@@ -1992,7 +1991,6 @@ def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
Note ``distance_sort`` and ``count_sort`` cannot both be
``True``.
-
show_leaf_counts : bool, optional
When ``True``, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
@@ -2036,7 +2034,6 @@ def llf(id):
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
-
show_contracted : bool
When ``True`` the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
View
34 scipy/fftpack/basic.py
@@ -344,12 +344,38 @@ def rfft(x, n=None, axis=-1, overwrite_x=0):
def irfft(x, n=None, axis=-1, overwrite_x=0):
- """ irfft(x, n=None, axis=-1, overwrite_x=0) -> y
-
+ """
Return inverse discrete Fourier transform of real sequence x.
+
The contents of x is interpreted as the output of the ``rfft(..)``
function.
+ Parameters
+ ----------
+ x : array_like
+ Transformed data to invert.
+ n : int, optional
+ Length of the inverse Fourier transform.
+ If n < x.shape[axis], x is truncated.
+ If n > x.shape[axis], x is zero-padded.
+ The default results in n = x.shape[axis].
+ axis : int, optional
+ Axis along which the ifft's are computed; the default is over
+ the last axis (i.e., axis=-1).
+ overwrite_x : bool, optional
+ If True the contents of `x` can be destroyed; the default is False.
+
+ Returns
+ -------
+ irfft : ndarray of floats
+ The inverse discrete Fourier transform.
+
+ See Also
+ --------
+ rfft, ifft
+
+ Notes
+ -----
The returned real array contains::
[y(0),y(1),...,y(n-1)]
@@ -370,10 +396,6 @@ def irfft(x, n=None, axis=-1, overwrite_x=0):
For details on input parameters, see `rfft`.
- See Also
- --------
- rfft, ifft
-
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
View
7 scipy/optimize/zeros.py
@@ -84,7 +84,7 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
See Also
--------
- brentq, brenth, ridder, bisect : find zeroes in one dimension.
+ brentq, brenth, ridder, bisect
fsolve : find zeroes in n dimensions.
Notes
@@ -162,7 +162,8 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
def bisect(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
- """Find root of f in [a,b].
+ """
+ Find root of f in [a,b].
Basic bisection routine to find a zero of the function f between the
arguments a and b. f(a) and f(b) can not have the same signs. Slow but
@@ -204,7 +205,7 @@ def bisect(f, a, b, args=(),
See Also
--------
- brentq, brenth, bisect, newton : one-dimensional root-finding
+ brentq, brenth, bisect, newton
fixed_point : scalar fixed-point finder
fsolve : n-dimensional root-finding
View
35 scipy/spatial/distance.py
@@ -1651,10 +1651,10 @@ def num_obs_y(Y):
def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
- r"""
- Computes distance between each pair of observation vectors in the
- Cartesian product of two collections of vectors. ``XA`` is a
- :math:`m_A` by :math:`n` array while ``XB`` is a :math:`m_B` by
+ """
+ Computes distance between each pair of the two collections of inputs.
+
+ ``XA`` is a :math:`m_A` by :math:`n` array while ``XB`` is a :math:`m_B` by
:math:`n` array. A :math:`m_A` by :math:`m_B` array is
returned. An exception is thrown if ``XA`` and ``XB`` do not have
the same number of columns.
@@ -1675,7 +1675,7 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
2. ``Y = cdist(XA, XB, 'minkowski', p)``
Computes the distances using the Minkowski distance
- :math:`||u-v||_p` (:math:`p`-norm) where :math:`p \geq 1`.
+ :math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`.
3. ``Y = cdist(XA, XB, 'cityblock')``
@@ -1689,7 +1689,7 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
.. math::
- \sqrt{\sum {(u_i-v_i)^2 / V[x_i]}}.
+ \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
@@ -1706,11 +1706,11 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
.. math::
- 1 - \frac{u \cdot v}
+ 1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
- :math:`u \cdot v` is the dot product of :math:`u` and :math:`v`.
+ :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
7. ``Y = cdist(XA, XB, 'correlation')``
@@ -1718,11 +1718,11 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
.. math::
- 1 - \frac{(u - \bar{u}) \cdot (v - \bar{v})}
- {{||(u - \bar{u})||}_2 {||(v - \bar{v})||}_2}
+ 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
+ {{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
- where :math:`\bar{v}` is the mean of the elements of vector v,
- and :math:`x \cdot y` is the dot product of :math:`x` and :math:`y`.
+ where :math:`\\bar{v}` is the mean of the elements of vector v,
+ and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = cdist(XA, XB, 'hamming')``
@@ -1748,7 +1748,7 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
.. math::
- d(u,v) = \max_i {|u_i-v_i|}.
+ d(u,v) = \\max_i {|u_i-v_i|}.
11. ``Y = cdist(XA, XB, 'canberra')``
@@ -1757,7 +1757,7 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
.. math::
- d(u,v) = \sum_i \frac{|u_i-v_i|}
+ d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
12. ``Y = cdist(XA, XB, 'braycurtis')``
@@ -1768,8 +1768,8 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
.. math::
- d(u,v) = \frac{\sum_i (u_i-v_i)}
- {\sum_i (u_i+v_i)}
+ d(u,v) = \\frac{\\sum_i (u_i-v_i)}
+ {\\sum_i (u_i+v_i)}
13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
@@ -1841,7 +1841,7 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
- sokalsneath being called :math:`{n \choose 2}` times, which
+ sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
@@ -1877,6 +1877,7 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
-------
Y : ndarray
A :math:`m_A` by :math:`m_B` distance matrix.
+
"""
# 21. Y = cdist(XA, XB, 'test_Y')
View
26 scipy/stats/morestats.py
@@ -1323,17 +1323,16 @@ def thefunc(x):
def circmean(samples, high=2*pi, low=0, axis=None):
"""
- Compute the circular mean for samples assumed to be in the range
- [low to high].
+ Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
+ high : float or int, optional
+ High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
- high : float or int, optional
- High boundary for circular mean range. Default is 2*pi.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
@@ -1355,8 +1354,7 @@ def circmean(samples, high=2*pi, low=0, axis=None):
def circvar(samples, high=2*pi, low=0, axis=None):
"""
- Compute the circular variance for samples assumed to be in the range
- [low to high].
+ Compute the circular variance for samples assumed to be in a range
Parameters
----------
@@ -1365,11 +1363,10 @@ def circvar(samples, high=2*pi, low=0, axis=None):
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
- High boundary for circular variance range. Default is 2*pi.
+ High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
-
Returns
-------
@@ -1377,9 +1374,10 @@ def circvar(samples, high=2*pi, low=0, axis=None):
Circular variance.
Notes
- ------
- This uses a definition of circular variance that in the limit of small angles
- returns a number close to the 'linear' variance.
+ -----
+ This uses a definition of circular variance that in the limit of small
+ angles returns a number close to the 'linear' variance.
+
"""
ang = (samples - low)*2*pi / (high-low)
res = np.mean(exp(1j*ang), axis=axis)
@@ -1398,7 +1396,8 @@ def circstd(samples, high=2*pi, low=0, axis=None):
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
- High boundary for circular standard deviation range. Default is 2*pi.
+ High boundary for circular standard deviation range.
+ Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
@@ -1409,9 +1408,10 @@ def circstd(samples, high=2*pi, low=0, axis=None):
Circular standard deviation.
Notes
- ------
+ -----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
+
"""
ang = (samples - low)*2*pi / (high-low)
res = np.mean(exp(1j*ang), axis=axis)
View
59 scipy/stats/stats.py
@@ -224,7 +224,30 @@
def find_repeats(arr):
- """Find repeats in arr and return (repeats, repeat_count)
+ """
+ Find repeats and repeat counts.
+
+ Parameters
+ ----------
+ arr : array_like
+ Input array
+
+ Returns
+ -------
+ find_repeats : tuple
+ Returns a tuple of two 1-D ndarrays. The first ndarray are the repeats
+ as sorted, unique values that are repeated in `arr`. The second
+ ndarray are the counts mapped one-to-one of the repeated values
+ in the first ndarray.
+
+ Examples
+ --------
+ >>> sp.stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
+ (array([ 2. ]), array([ 4 ], dtype=int32)
+
+ >>> sp.stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
+ (array([ 4., 5.]), array([2, 2], dtype=int32))
+
"""
v1,v2, n = futil.dfreps(arr)
return v1[:n],v2[:n]
@@ -1695,13 +1718,19 @@ def obrientransform(*args):
Computes a transform on input data (any number of columns).
Used to test for homogeneity of variance prior to running one-way stats.
- Each array in *args is one level of a factor. If an F_oneway() run on the
- transformed data and found significant, variances are unequal. From
- Maxwell and Delaney, p.112.
+ Each array in ``*args`` is one level of a factor.
+ If an `F_oneway` run on the transformed data and found significant,
+ variances are unequal. From Maxwell and Delaney, p.112.
+
+ Parameters
+ ----------
+ args : ndarray
+ Any number of arrays.
Returns
-------
- Transformed data for use in an ANOVA
+ obrientransform : ndarray
+ Transformed data for use in an ANOVA.
"""
TINY = 1e-10
@@ -3466,15 +3495,27 @@ def mannwhitneyu(x, y, use_continuity=True):
def tiecorrect(rankvals):
- """Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
- See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
- Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
- code.
+ """
+ Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
+
+ Parameters
+ ----------
+ rankvals : array_like
+ Input values
Returns
-------
T correction factor for U or H
+ Notes
+ -----
+ Code adapted from \\|STAT rankind.c code.
+
+ References
+ ----------
+ Siegel, S. (1956) Nonparametric Statistics for the Behavioral
+ Sciences. New York: McGraw-Hill.
+
"""
sorted,posn = fastsort(asarray(rankvals))
n = len(sorted)
Please sign in to comment.
Something went wrong with that request. Please try again.