Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ Enhancements
- In the theoretical VPF calculation (``theory.vpf``), the total volume of the random spheres can now exceed the volume of the sample [#238]
- Gridlink (the binning of particles into cells) now uses a parallel algorithm for the theory module [#239]
- Add detection of known-bad Cray hugepages library at NERSC [#246]
- Replace ``np.float`` with ``np.float64`` to fix numpy 1.20 deprecation [#250]

Bug fixes
---------
Expand Down
16 changes: 8 additions & 8 deletions Corrfunc/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def read_fastfood_catalog(filename, return_dtype=None, need_header=None):

return_dtype: numpy dtype for returned arrays. Default ``numpy.float``
Specifies the datatype for the returned arrays. Must be in
{np.float, np.float32}
{np.float64, np.float32}

need_header: boolean, default None.
Returns the header found in the fast-food file in addition to the
Expand Down Expand Up @@ -81,9 +81,9 @@ def read_fastfood_catalog(filename, return_dtype=None, need_header=None):

"""
if return_dtype is None:
return_dtype = np.float
return_dtype = np.float64

if return_dtype not in [np.float32, np.float]:
if return_dtype not in [np.float32, np.float64]:
msg = "Return data-type must be set and a valid numpy float"
raise ValueError(msg)

Expand Down Expand Up @@ -142,7 +142,7 @@ def read_fastfood_catalog(filename, return_dtype=None, need_header=None):
assert skip1 == ngal * 4 or skip1 == ngal * 8, \
"fast-food file seems to be corrupt (padding bytes a)"
# the next division must be the integer division
input_dtype = np.float32 if skip1 // ngal == 4 else np.float
input_dtype = np.float32 if skip1 // ngal == 4 else np.float64
array = np.fromfile(f, input_dtype, ngal)
skip2 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
if return_dtype == input_dtype:
Expand Down Expand Up @@ -171,7 +171,7 @@ def read_ascii_catalog(filename, return_dtype=None):

return_dtype: numpy dtype for returned arrays. Default ``numpy.float``
Specifies the datatype for the returned arrays. Must be in
{np.float, np.float32}
{np.float64, np.float32}

Returns
--------
Expand Down Expand Up @@ -216,7 +216,7 @@ def read_ascii_catalog(filename, return_dtype=None):
"""

if return_dtype is None:
return_dtype = np.float
return_dtype = np.float64

if not file_exists(filename):
msg = "Could not find file = {0}".format(filename)
Expand All @@ -241,7 +241,7 @@ def read_ascii_catalog(filename, return_dtype=None):
return x, y, z


def read_catalog(filebase=None, return_dtype=np.float):
def read_catalog(filebase=None, return_dtype=np.float64):
"""
Reads a galaxy/randoms catalog and returns 3 XYZ arrays.

Expand All @@ -254,7 +254,7 @@ def read_catalog(filebase=None, return_dtype=np.float):

return_dtype: numpy dtype for returned arrays. Default ``numpy.float``
Specifies the datatype for the returned arrays. Must be in
{np.float, np.float32}
{np.float64, np.float32}

Returns
--------
Expand Down
10 changes: 5 additions & 5 deletions Corrfunc/mocks/DDrppi_mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,12 +387,12 @@ def DDrppi_mocks(autocorr, cosmology, nthreads, pimax, binfile,
import os
os.remove(rbinfile)

results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float),
(bytes_to_native_str(b'rmax'), np.float),
(bytes_to_native_str(b'rpavg'), np.float),
(bytes_to_native_str(b'pimax'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float64),
(bytes_to_native_str(b'rmax'), np.float64),
(bytes_to_native_str(b'rpavg'), np.float64),
(bytes_to_native_str(b'pimax'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float)])
(bytes_to_native_str(b'weightavg'), np.float64)])
results = np.array(extn_results, dtype=results_dtype)

if not c_api_timer:
Expand Down
10 changes: 5 additions & 5 deletions Corrfunc/mocks/DDsmu_mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,12 +322,12 @@ def DDsmu_mocks(autocorr, cosmology, nthreads, mu_max, nmu_bins, binfile,
import os
os.remove(sbinfile)

results_dtype = np.dtype([(bytes_to_native_str(b'smin'), np.float),
(bytes_to_native_str(b'smax'), np.float),
(bytes_to_native_str(b'savg'), np.float),
(bytes_to_native_str(b'mumax'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'smin'), np.float64),
(bytes_to_native_str(b'smax'), np.float64),
(bytes_to_native_str(b'savg'), np.float64),
(bytes_to_native_str(b'mumax'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float)])
(bytes_to_native_str(b'weightavg'), np.float64)])

nbin = len(extn_results)
results = np.zeros(nbin, dtype=results_dtype)
Expand Down
12 changes: 6 additions & 6 deletions Corrfunc/mocks/DDtheta_mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,11 +338,11 @@ def DDtheta_mocks(autocorr, nthreads, binfile,
import os
os.remove(rbinfile)

results_dtype = np.dtype([(bytes_to_native_str(b'thetamin'), np.float),
(bytes_to_native_str(b'thetamax'), np.float),
(bytes_to_native_str(b'thetaavg'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'thetamin'), np.float64),
(bytes_to_native_str(b'thetamax'), np.float64),
(bytes_to_native_str(b'thetaavg'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float)])
(bytes_to_native_str(b'weightavg'), np.float64)])
results = np.array(extn_results, dtype=results_dtype)

if not c_api_timer:
Expand Down Expand Up @@ -556,8 +556,8 @@ def find_fastest_DDtheta_mocks_bin_refs(autocorr, nthreads, binfile,

dtype = np.dtype([(bytes_to_native_str(b'nRA'), np.int),
(bytes_to_native_str(b'nDEC'), np.int),
(bytes_to_native_str(b'avg_time'), np.float),
(bytes_to_native_str(b'sigma_time'), np.float)])
(bytes_to_native_str(b'avg_time'), np.float64),
(bytes_to_native_str(b'sigma_time'), np.float64)])
all_runtimes = np.zeros(nperms, dtype=dtype)
all_runtimes[:] = np.inf

Expand Down
4 changes: 2 additions & 2 deletions Corrfunc/mocks/vpf_mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,9 +311,9 @@ def vpf_mocks(rmax, nbins, nspheres, numpN,
else:
extn_results, api_time = extn_results

results_dtype = np.dtype([(bytes_to_native_str(b'rmax'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'rmax'), np.float64),
(bytes_to_native_str(b'pN'),
(np.float, numpN))])
(np.float64, numpN))])
nbin = len(extn_results)
results = np.zeros(nbin, dtype=results_dtype)

Expand Down
8 changes: 4 additions & 4 deletions Corrfunc/theory/DD.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,11 +254,11 @@ def DD(autocorr, nthreads, binfile, X1, Y1, Z1, weights1=None, periodic=True,
import os
os.remove(rbinfile)

results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float),
(bytes_to_native_str(b'rmax'), np.float),
(bytes_to_native_str(b'ravg'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float64),
(bytes_to_native_str(b'rmax'), np.float64),
(bytes_to_native_str(b'ravg'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float)])
(bytes_to_native_str(b'weightavg'), np.float64)])
results = np.array(extn_results, dtype=results_dtype)
if not c_api_timer:
return results
Expand Down
11 changes: 6 additions & 5 deletions Corrfunc/theory/DDrppi.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,12 +305,13 @@ def DDrppi(autocorr, nthreads, pimax, binfile, X1, Y1, Z1, weights1=None,
import os
os.remove(rbinfile)

results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float),
(bytes_to_native_str(b'rmax'), np.float),
(bytes_to_native_str(b'rpavg'), np.float),
(bytes_to_native_str(b'pimax'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float64),
(bytes_to_native_str(b'rmax'), np.float64),
(bytes_to_native_str(b'rpavg'), np.float64),
(bytes_to_native_str(b'pimax'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float),])
(bytes_to_native_str(b'weightavg'), np.float64),
])
results = np.array(extn_results, dtype=results_dtype)

if not c_api_timer:
Expand Down
11 changes: 6 additions & 5 deletions Corrfunc/theory/DDsmu.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,12 +322,13 @@ def DDsmu(autocorr, nthreads, binfile, mu_max, nmu_bins,
import os
os.remove(sbinfile)

results_dtype = np.dtype([(bytes_to_native_str(b'smin'), np.float),
(bytes_to_native_str(b'smax'), np.float),
(bytes_to_native_str(b'savg'), np.float),
(bytes_to_native_str(b'mu_max'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'smin'), np.float64),
(bytes_to_native_str(b'smax'), np.float64),
(bytes_to_native_str(b'savg'), np.float64),
(bytes_to_native_str(b'mu_max'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float),])
(bytes_to_native_str(b'weightavg'), np.float64),
])
results = np.array(extn_results, dtype=results_dtype)

if not c_api_timer:
Expand Down
4 changes: 2 additions & 2 deletions Corrfunc/theory/vpf.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,9 @@ def vpf(rmax, nbins, nspheres, numpN, seed,
else:
extn_results, api_time = extn_results

results_dtype = np.dtype([(bytes_to_native_str(b'rmax'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'rmax'), np.float64),
(bytes_to_native_str(b'pN'),
(np.float, numpN))])
(np.float64, numpN))])
nbin = len(extn_results)
results = np.zeros(nbin, dtype=results_dtype)

Expand Down
14 changes: 7 additions & 7 deletions Corrfunc/theory/wp.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,8 @@ def find_fastest_wp_bin_refs(boxsize, pimax, nthreads, binfile, X, Y, Z,
dtype = np.dtype([(bytes_to_native_str(b'nx'), np.int),
(bytes_to_native_str(b'ny'), np.int),
(bytes_to_native_str(b'nz'), np.int),
(bytes_to_native_str(b'avg_time'), np.float),
(bytes_to_native_str(b'sigma_time'), np.float)])
(bytes_to_native_str(b'avg_time'), np.float64),
(bytes_to_native_str(b'sigma_time'), np.float64)])
all_runtimes = np.zeros(maxbinref**3, dtype=dtype)
all_runtimes[:] = np.inf

Expand Down Expand Up @@ -525,12 +525,12 @@ def wp(boxsize, pimax, nthreads, binfile, X, Y, Z,
import os
os.remove(rbinfile)

results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float),
(bytes_to_native_str(b'rmax'), np.float),
(bytes_to_native_str(b'rpavg'), np.float),
(bytes_to_native_str(b'wp'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float64),
(bytes_to_native_str(b'rmax'), np.float64),
(bytes_to_native_str(b'rpavg'), np.float64),
(bytes_to_native_str(b'wp'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float)])
(bytes_to_native_str(b'weightavg'), np.float64)])
results = np.array(extn_results, dtype=results_dtype)

# A better solution for returning multiple values based on
Expand Down
10 changes: 5 additions & 5 deletions Corrfunc/theory/xi.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,12 +234,12 @@ def xi(boxsize, nthreads, binfile, X, Y, Z,
import os
os.remove(rbinfile)

results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float),
(bytes_to_native_str(b'rmax'), np.float),
(bytes_to_native_str(b'ravg'), np.float),
(bytes_to_native_str(b'xi'), np.float),
results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float64),
(bytes_to_native_str(b'rmax'), np.float64),
(bytes_to_native_str(b'ravg'), np.float64),
(bytes_to_native_str(b'xi'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float)])
(bytes_to_native_str(b'weightavg'), np.float64)])
results = np.array(extn_results, dtype=results_dtype)

if not c_api_timer:
Expand Down
8 changes: 4 additions & 4 deletions Corrfunc/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,8 @@ def convert_3d_counts_to_cf(ND1, ND2, NR1, NR2,

nonzero = pair_counts['R1R2'] > 0
if 'LS' in estimator or 'Landy' in estimator:
fN1 = np.float(NR1) / np.float(ND1)
fN2 = np.float(NR2) / np.float(ND2)
fN1 = np.float64(NR1) / np.float64(ND1)
fN2 = np.float64(NR2) / np.float64(ND2)
cf = np.zeros(nbins)
cf[:] = np.nan
cf[nonzero] = (fN1 * fN2 * pair_counts['D1D2'][nonzero] -
Expand Down Expand Up @@ -795,8 +795,8 @@ def gridlink_sphere(thetamax,
dec_binsize = dec_diff/ngrid_dec

# Upper and lower limits of the declination bands
grid_dtype= np.dtype({'names':['dec_limit','ra_limit'],
'formats':[(np.float, (2, )), (np.float, (2, ))]
grid_dtype = np.dtype({'names': ['dec_limit', 'ra_limit'],
'formats': [(np.float64, (2, )), (np.float64, (2, ))]
})
if not link_in_ra:
sphere_grid = np.zeros(ngrid_dec, dtype=grid_dtype)
Expand Down
16 changes: 6 additions & 10 deletions docs/source/modules/read_catalog.rst
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ then you can just pass them straight to ``Corrfunc``.
If you need to read the arrays in from disk, then read
on. For the command-line interface, the input files can only
be in ASCII or fast-food format (for description of fast-food
binaries, see :ref:`fast_food_binary`).
binaries, see :ref:`fast_food_binary`).

.. toctree::
:maxdepth: 1
Expand All @@ -24,7 +24,7 @@ Reading from ASCII files
========================

This is the most straight forward way -- you need an ASCII
file with columns X/Y/Z (white-space separated).
file with columns X/Y/Z (white-space separated).

Using ``numpy.genfromtxt``
---------------------------
Expand All @@ -35,12 +35,12 @@ Using ``numpy.genfromtxt``
fname = "myfile_containing_xyz_columns.dat"

# For double precision calculations
dtype = np.float ## change to np.float32 for single precision
dtype = np.float64 ## change to np.float32 for single precision

X, Y, Z = np.genfromtxt(fname, dtype=dtype, unpack=True)


.. note:: :py:mod:`Corrfunc.read_catalog` uses this exact code-snippet to read in ASCII files in python.
.. note:: :py:mod:`Corrfunc.read_catalog` uses this exact code-snippet to read in ASCII files in python.


Reading from fast-food files
Expand All @@ -50,14 +50,14 @@ If you are using the command-line interface, then the code will **have** to
read the arrays from files. While ``Corrfunc`` natively supports both
ASCII and fast-food formats (for description of fast-food binaries, see
:ref:`fast_food_binary`), the following python utility is intended to
read both these types of files.
read both these types of files.


Using utility: :py:mod:`Corrfunc.io.read_catalog`
-------------------------------------------------

:py:mod:`Corrfunc.io.read_catalog` can directly read ASCII files or fast-food binary
files.
files.

.. code:: python

Expand All @@ -71,7 +71,3 @@ files.
# filename
fname = "myfile_containing_xyz_columns.dat"
X, Y, Z = read_catalog(fname)




2 changes: 1 addition & 1 deletion mocks/python_bindings/call_correlation_functions_mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def main():
# Double-precision calculations
# (if you want single-prec, just change the following line
# to dtype = np.float32)
dtype = np.float
dtype = np.float64

# Check if pandas is available - much faster to read in the
# data through pandas
Expand Down
4 changes: 2 additions & 2 deletions theory/python_bindings/call_correlation_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def read_fastfood_catalog(filename, return_dtype=None, need_header=None):
assert skip1 == ngal * 4 or skip1 == ngal * 8, \
"fast-food file seems to be corrupt (padding bytes a)"
# the next division must be the integer division
input_dtype = np.float32 if skip1 // ngal == 4 else np.float
input_dtype = np.float32 if skip1 // ngal == 4 else np.float64
array = np.fromfile(f, input_dtype, ngal)
skip2 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
pos[field] = array if return_dtype == input_dtype \
Expand Down Expand Up @@ -181,7 +181,7 @@ def read_fastfood_catalog(filename, return_dtype=None, need_header=None):
f = read_fastfood_catalog if '.ff' in extension else read_ascii_catalog

# default return is double
x, y, z = f(filebase, np.float)
x, y, z = f(filebase, np.float64)
return x, y, z

raise IOError("Could not locate file {0}", filebase)
Expand Down