Skip to content

Commit

Permalink
Improve compatibility with 32bit architectures
Browse files Browse the repository at this point in the history
This replaces np.foo64 with np.foo_ and vice versa for foo in {float, int}

See dask#20
  • Loading branch information
mrocklin committed Nov 30, 2017
1 parent 32ebaa5 commit 273ef51
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 16 deletions.
28 changes: 14 additions & 14 deletions dask/array/routines.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,16 +614,16 @@ def _unique_internal(ar, indices, counts, return_inverse=False):

dt = [("values", u.dtype)]
if return_index:
dt.append(("indices", np.int64))
dt.append(("indices", np.int_))
if return_inverse:
dt.append(("inverse", np.int64))
dt.append(("inverse", np.int_))
if return_counts:
dt.append(("counts", np.int64))
dt.append(("counts", np.int_))

r = np.empty(u.shape, dtype=dt)
r["values"] = u
if return_inverse:
r["inverse"] = np.arange(len(r), dtype=np.int64)
r["inverse"] = np.arange(len(r), dtype=np.int_)
if return_index or return_counts:
for i, v in enumerate(r["values"]):
m = (ar == v)
Expand All @@ -646,18 +646,18 @@ def unique(ar, return_index=False, return_inverse=False, return_counts=False):
out_dtype = [("values", ar.dtype)]
if return_index:
args.extend([
arange(ar.shape[0], dtype=np.int64, chunks=ar.chunks[0]),
arange(ar.shape[0], dtype=np.int_, chunks=ar.chunks[0]),
"i"
])
out_dtype.append(("indices", np.int64))
out_dtype.append(("indices", np.int_))
else:
args.extend([None, None])
if return_counts:
args.extend([
ones((ar.shape[0],), dtype=np.int64, chunks=ar.chunks[0]),
ones((ar.shape[0],), dtype=np.int_, chunks=ar.chunks[0]),
"i"
])
out_dtype.append(("counts", np.int64))
out_dtype.append(("counts", np.int_))
else:
args.extend([None, None])

Expand Down Expand Up @@ -702,11 +702,11 @@ def unique(ar, return_index=False, return_inverse=False, return_counts=False):
}
out_dtype = [("values", ar.dtype)]
if return_index:
out_dtype.append(("indices", np.int64))
out_dtype.append(("indices", np.int_))
if return_inverse:
out_dtype.append(("inverse", np.int64))
out_dtype.append(("inverse", np.int_))
if return_counts:
out_dtype.append(("counts", np.int64))
out_dtype.append(("counts", np.int_))

out = Array(
sharedict.merge(*(
Expand All @@ -730,7 +730,7 @@ def unique(ar, return_index=False, return_inverse=False, return_counts=False):
# index in axis `1` (the one of unknown length). Reduce axis `1`
# through summing to get an array with known dimensionality and the
# mapping of the original values.
mtches = (ar[:, None] == out["values"][None, :]).astype(np.int64)
mtches = (ar[:, None] == out["values"][None, :]).astype(np.int_)
result.append((mtches * out["inverse"]).sum(axis=1))
if return_counts:
result.append(out["counts"])
Expand Down Expand Up @@ -978,7 +978,7 @@ def argwhere(a):

nz = isnonzero(a).flatten()

ind = indices(a.shape, dtype=np.int64, chunks=a.chunks)
ind = indices(a.shape, dtype=np.int_, chunks=a.chunks)
if ind.ndim > 1:
ind = stack([ind[i].ravel() for i in range(len(ind))], axis=1)
ind = compress(nz, ind, axis=0)
Expand Down Expand Up @@ -1008,7 +1008,7 @@ def where(condition, x=None, y=None):

@wraps(np.count_nonzero)
def count_nonzero(a, axis=None):
return isnonzero(asarray(a)).astype(np.int64).sum(axis=axis)
return isnonzero(asarray(a)).astype(np.int_).sum(axis=axis)


@wraps(np.flatnonzero)
Expand Down
2 changes: 1 addition & 1 deletion dask/array/tests/test_routines.py
Original file line number Diff line number Diff line change
Expand Up @@ -992,7 +992,7 @@ def test_count_nonzero_obj_axis(axis):
# #
# xref: https://github.com/numpy/numpy/issues/9468 #
#######################################################
assert_eq(x_c.astype(np.int64), d_c)
assert_eq(x_c.astype(np.int_), d_c)


def test_count_nonzero_str():
Expand Down
2 changes: 1 addition & 1 deletion dask/dataframe/tests/test_dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -1149,7 +1149,7 @@ def test_repartition_on_pandas_dataframe():
@pytest.mark.parametrize('use_index', [True, False])
@pytest.mark.parametrize('n', [1, 2, 4, 5])
@pytest.mark.parametrize('k', [1, 2, 4, 5])
@pytest.mark.parametrize('dtype', [int, float, 'M8[ns]'])
@pytest.mark.parametrize('dtype', [int, np.float, 'M8[ns]'])
@pytest.mark.parametrize('transform', [lambda df: df, lambda df: df.x])
def test_repartition_npartitions(use_index, n, k, dtype, transform):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6] * 10,
Expand Down

0 comments on commit 273ef51

Please sign in to comment.