Skip to content

Commit

Permalink
TMP: some fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
bsipocz committed May 26, 2023
1 parent 011d5fa commit 50554c9
Show file tree
Hide file tree
Showing 10 changed files with 79 additions and 76 deletions.
10 changes: 4 additions & 6 deletions numpy/__init__.py
Expand Up @@ -29,8 +29,7 @@
Use the built-in ``help`` function to view a function's docstring::
>>> help(np.sort)
... # doctest: +SKIP
>>> help(np.sort) # doctest: +IGNORE_OUTPUT
For some objects, ``np.info(obj)`` may provide additional help. This is
particularly true if you see the line "Help on ufunc object:" at the top
Expand All @@ -40,15 +39,14 @@
To search for documents containing a keyword, do::
>>> np.lookfor('keyword')
... # doctest: +SKIP
>>> np.lookfor('keyword') # doctest: +IGNORE_OUTPUT
General-purpose documents like a glossary and help on the basic concepts
of numpy are available under the ``doc`` sub-module::
>>> from numpy import doc
>>> help(doc)
... # doctest: +SKIP
>>> help(doc) # doctest: +IGNORE_OUTPUT
Available subpackages
---------------------
Expand Down
6 changes: 4 additions & 2 deletions numpy/core/_add_newdocs_scalars.py
Expand Up @@ -270,10 +270,12 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
void(b'\x00\x00\x00\x00\x00')
>>> np.void(b'abcd')
void(b'\x61\x62\x63\x64')
>>> # looks like a tuple, but is `np.void`
>>> np.void((5, 3.2, "eggs"), dtype="i,d,S5")
(5, 3.2, b'eggs') # looks like a tuple, but is `np.void`
(5, 3.2, b'eggs')
>>> # looks like a tuple, but is `np.void`
>>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
(3, 3) # looks like a tuple, but is `np.void`
(3, 3)
""")

Expand Down
3 changes: 2 additions & 1 deletion numpy/core/arrayprint.py
Expand Up @@ -61,6 +61,7 @@
# str/False on the way in/out.
'legacy': sys.maxsize}


def _make_options_dict(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
sign=None, formatter=None, floatmode=None, legacy=None):
Expand Down Expand Up @@ -233,7 +234,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
>>> np.set_printoptions(precision=4)
>>> np.array([1.123456789])
[1.1235]
array([1.1235])
Long arrays can be summarised:
Expand Down
39 changes: 19 additions & 20 deletions numpy/core/fromnumeric.py
Expand Up @@ -9,9 +9,8 @@
from .._utils import set_module
from . import multiarray as mu
from . import overrides
from . import umath as um
from . import numerictypes as nt
from .multiarray import asarray, array, asanyarray, concatenate
from .multiarray import asarray, asanyarray, concatenate
from . import _methods

_dt_ = nt.sctype2char
Expand Down Expand Up @@ -240,7 +239,7 @@ def reshape(a, newshape, order='C'):
-----
It is not always possible to change the shape of an array without copying
the data.
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
Expand Down Expand Up @@ -995,14 +994,14 @@ def sort(a, axis=-1, kind=None, order=None):
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
>>> np.sort(a, order='height') # doctest: +IGNORE_OUTPUT
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
>>> np.sort(a, order=['age', 'height']) # doctest: +IGNORE_OUTPUT
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Expand Down Expand Up @@ -1540,8 +1539,8 @@ def squeeze(a, axis=None):
>>> x = np.array([[1234]])
>>> x.shape
(1, 1)
>>> np.squeeze(x)
array(1234) # 0d array
>>> np.squeeze(x) # results a 0d array
array(1234)
>>> np.squeeze(x).shape
()
>>> np.squeeze(x)[()]
Expand Down Expand Up @@ -2498,7 +2497,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
(28293632, 28293632, array(True))
"""
return _wrapreduction(a, np.logical_and, 'all', axis, None, out,
Expand Down Expand Up @@ -2741,7 +2740,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
max : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is an int, the result is an array of dimension
``a.ndim - 1``. If `axis` is a tuple, the result is an array of
``a.ndim - 1``. If `axis` is a tuple, the result is an array of
dimension ``a.ndim - len(axis)``.
See Also
Expand Down Expand Up @@ -2884,7 +2883,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
min : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is an int, the result is an array of dimension
``a.ndim - 1``. If `axis` is a tuple, the result is an array of
``a.ndim - 1``. If `axis` is a tuple, the result is an array of
dimension ``a.ndim - len(axis)``.
See Also
Expand Down Expand Up @@ -3045,8 +3044,8 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x)
16 # may vary
>>> np.prod(x) # doctest: +SKIP
16
The product of an empty array is the neutral element 1:
Expand All @@ -3072,7 +3071,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
array([ 2., 12.])
>>> np.prod(a, axis=0)
array([3., 8.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
Expand Down Expand Up @@ -3477,7 +3476,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806 # may vary
0.55000000074505806
Specifying a where argument:
Expand Down Expand Up @@ -3601,7 +3600,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
1.1180339887498949
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
Expand All @@ -3618,13 +3617,13 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177 # may vary
0.44999999925494177
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.std(a)
2.614064523559687 # may vary
2.614064523559687
>>> np.std(a, where=[[True], [True], [False]])
2.0
Expand Down Expand Up @@ -3756,15 +3755,15 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759 # may vary
0.20249999932944759
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.var(a)
6.833333333333333 # may vary
6.833333333333333
>>> np.var(a, where=[[True], [True], [False]])
4.0
Expand All @@ -3788,7 +3787,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
**kwargs)


# Aliases of other functions. Provided unique docstrings
# Aliases of other functions. Provided unique docstrings
# are for reference purposes only. Wherever possible,
# avoid using them.

Expand Down
2 changes: 2 additions & 0 deletions numpy/lib/_datasource.py
Expand Up @@ -42,6 +42,8 @@

_open = open

__doctest_skip__ = ['DataSource']


def _check_mode(mode, encoding, newline):
"""Check mode and that encoding and newline are compatible.
Expand Down
10 changes: 5 additions & 5 deletions numpy/lib/arraysetops.py
Expand Up @@ -265,13 +265,13 @@ def unique(ar, return_index=False, return_inverse=False,
array([1, 2, 3, 4, 6])
>>> counts
array([1, 3, 1, 1, 1])
>>> np.repeat(values, counts)
array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
>>> np.repeat(values, counts) # original order not preserved
array([1, 2, 2, 2, 3, 4, 6])
"""
ar = np.asanyarray(ar)
if axis is None:
ret = _unique1d(ar, return_index, return_inverse, return_counts,
ret = _unique1d(ar, return_index, return_inverse, return_counts,
equal_nan=equal_nan)
return _unpack_tuple(ret)

Expand Down Expand Up @@ -673,11 +673,11 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
# However, here we set the requirement that by default
# the intermediate array can only be 6x
# the combined memory allocation of the original
# arrays. See discussion on
# arrays. See discussion on
# https://github.com/numpy/numpy/pull/12065.

if (
range_safe_from_overflow and
range_safe_from_overflow and
(below_memory_constraint or kind == 'table')
):

Expand Down

0 comments on commit 50554c9

Please sign in to comment.