Skip to content

Commit

Permalink
More ruff linting
Browse files Browse the repository at this point in the history
  • Loading branch information
lgarrison committed Mar 10, 2023
1 parent a651781 commit fc54ef9
Show file tree
Hide file tree
Showing 20 changed files with 142 additions and 119 deletions.
4 changes: 4 additions & 0 deletions abacusnbody/data/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Stop astropy from trying to download time data; nodes on some clusters are not allowed to access the internet directly
from astropy.utils import iers

iers.conf.auto_download = False
2 changes: 1 addition & 1 deletion abacusnbody/data/asdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def compress(self, data, **kwargs):
shuffle = blosc.SHUFFLE
elif shuffle == 'bitshuffle':
shuffle = blosc.BITSHUFFLE
elif shuffle == None:
elif shuffle is None:
shuffle = blosc.NOSHUFFLE
else:
raise ValueError(shuffle)
Expand Down
97 changes: 60 additions & 37 deletions abacusnbody/data/compaso_halo_catalog.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,34 +15,29 @@
import warnings
from collections import defaultdict
from glob import glob
from os.path import abspath, basename, dirname, isdir, isfile
from os.path import abspath, basename, dirname, isdir, isfile, samefile
from os.path import join as pjoin
from os.path import normpath, samefile
from pathlib import PurePath

# Stop astropy from trying to download time data; nodes on some clusters are not allowed to access the internet directly
from astropy.utils import iers

iers.conf.auto_download = False

import asdf
import asdf.compression
import astropy.table
import numba as nb
import numpy as np
from astropy.table import Table

from . import asdf as _asdf
from . import bitpacked

try:
asdf.compression.validate('blsc')
except Exception as e:
raise Exception("Abacus ASDF extension not properly loaded! Try reinstalling abacusutils, or updating ASDF: `pip install asdf>=2.8`") from e

from . import bitpacked

# Default to 4 decompression threads, or fewer if fewer cores are available
DEFAULT_BLOSC_THREADS = 4
DEFAULT_BLOSC_THREADS = max(1, min(len(os.sched_getaffinity(0)), DEFAULT_BLOSC_THREADS))
from . import asdf as _asdf

_asdf.set_nthreads(DEFAULT_BLOSC_THREADS)

Expand Down Expand Up @@ -78,12 +73,12 @@ def __init__(self, path, cleaned=True, subsamples=False, convert_units=True, unp
Will accept ``halo_info`` dirs or "redshift" dirs
(e.g. ``z1.000/halo_info/`` or ``z1.000/``).
.. note::
.. note::
To load cleaned catalogs, you do *not* need to pass a different
argument to the ``path`` directory. Use ``cleaned=True`` instead
and the path to the cleaning info will be detected automatically
(or see ``cleandir``).
argument to the ``path`` directory. Use ``cleaned=True`` instead
and the path to the cleaning info will be detected automatically
(or see ``cleandir``).
cleaned: bool, optional
Loads the "cleaned" version of the halo catalogues. Always recommended.
Expand Down Expand Up @@ -167,7 +162,7 @@ def __init__(self, path, cleaned=True, subsamples=False, convert_units=True, unp
# said `cleaned=True` or because this is a halo light cone catalog, which is already cleaned
self.cleaned = cleaned

if halo_lc == None:
if halo_lc is None:
halo_lc = self._is_path_halo_lc(path)
if verbose and halo_lc:
print('Detected halo light cone catalog.')
Expand Down Expand Up @@ -377,7 +372,7 @@ def _setup_unpack_bits(self, unpack_bits):
try:
for _f in unpack_bits:
assert _f in bitpacked.PID_FIELDS
except:
except Exception:
raise ValueError(f'`unpack_bits` must be True, False, or one of: "{bitpacked.PID_FIELDS}"')
return unpack_bits

Expand All @@ -388,14 +383,14 @@ def _setup_load_subsamples(self, load_subsamples):
Will be returned as lists of strings in `load_AB` and `load_pidrv`.
`unpack_subsamples` is for pipelining, to keep things in rvint.
'''
if load_subsamples == False:
if load_subsamples is False:
# stub
load_AB = []
load_pidrv = []
unpack_subsamples = True
else:
# If user has not specified which subsamples, then assume user wants to load everything
if load_subsamples == True:
if load_subsamples is True:
load_subsamples = dict(A=True, B=True, rv=True, pid=True)

if type(load_subsamples) == dict:
Expand Down Expand Up @@ -934,7 +929,7 @@ def _reindex_subsamples(self, RVorPID, N_halo_per_file, cleaned=True, halo_lc=Fa
np_per_file = np.array(np_per_file)

particle_dict = {'particle_AB_afs': particle_AB_afs,
'np_per_file': np_per_file,
'np_per_file': np_per_file,
'particle_AB_merge_afs': particle_AB_merge_afs,
'np_per_file_merge': np_per_file_merge,
'key_to_read': key_to_read}
Expand Down Expand Up @@ -1221,7 +1216,7 @@ def _unpack_euler16(bin_this):
bin_this = bin_this - cap*(EULER_TBIN*EULER_TBIN)

it = (np.floor(np.sqrt(bin_this))).astype(int)
its = np.sum(np.isnan(it))
# its = np.sum(np.isnan(it))


ir = bin_this - it*it
Expand All @@ -1238,23 +1233,48 @@ def _unpack_euler16(bin_this):
# and zz=1
norm = 1.0/np.sqrt(1.0+xx*xx+yy*yy)
zz = norm
yy *= norm; xx *= norm; # These are now a unit vector
yy *= norm
xx *= norm # These are now a unit vector

# TODO: legacy code, rewrite
major[cap==0,0] = zz[cap==0]; major[cap==0,1] = yy[cap==0]; major[cap==0,2] = xx[cap==0];
major[cap==1,0] = zz[cap==1]; major[cap==1,1] =-yy[cap==1]; major[cap==1,2] = xx[cap==1];
major[cap==2,0] = zz[cap==2]; major[cap==2,1] = xx[cap==2]; major[cap==2,2] = yy[cap==2];
major[cap==3,0] = zz[cap==3]; major[cap==3,1] = xx[cap==3]; major[cap==3,2] =-yy[cap==3];

major[cap==4,1] = zz[cap==4]; major[cap==4,2] = yy[cap==4]; major[cap==4,0] = xx[cap==4];
major[cap==5,1] = zz[cap==5]; major[cap==5,2] =-yy[cap==5]; major[cap==5,0] = xx[cap==5];
major[cap==6,1] = zz[cap==6]; major[cap==6,2] = xx[cap==6]; major[cap==6,0] = yy[cap==6];
major[cap==7,1] = zz[cap==7]; major[cap==7,2] = xx[cap==7]; major[cap==7,0] =-yy[cap==7];

major[cap==8,2] = zz[cap==8]; major[cap==8,0] = yy[cap==8]; major[cap==8,1] = xx[cap==8];
major[cap==9,2] = zz[cap==9]; major[cap==9,0] =-yy[cap==9]; major[cap==9,1] = xx[cap==9];
major[cap==10,2] = zz[cap==10]; major[cap==10,0] = xx[cap==10]; major[cap==10,1] = yy[cap==10];
major[cap==11,2] = zz[cap==11]; major[cap==11,0] = xx[cap==11]; major[cap==11,1] =-yy[cap==11];
major[cap==0,0] = zz[cap==0]
major[cap==0,1] = yy[cap==0]
major[cap==0,2] = xx[cap==0]
major[cap==1,0] = zz[cap==1]
major[cap==1,1] =-yy[cap==1]
major[cap==1,2] = xx[cap==1]
major[cap==2,0] = zz[cap==2]
major[cap==2,1] = xx[cap==2]
major[cap==2,2] = yy[cap==2]
major[cap==3,0] = zz[cap==3]
major[cap==3,1] = xx[cap==3]
major[cap==3,2] =-yy[cap==3]

major[cap==4,1] = zz[cap==4]
major[cap==4,2] = yy[cap==4]
major[cap==4,0] = xx[cap==4]
major[cap==5,1] = zz[cap==5]
major[cap==5,2] =-yy[cap==5]
major[cap==5,0] = xx[cap==5]
major[cap==6,1] = zz[cap==6]
major[cap==6,2] = xx[cap==6]
major[cap==6,0] = yy[cap==6]
major[cap==7,1] = zz[cap==7]
major[cap==7,2] = xx[cap==7]
major[cap==7,0] =-yy[cap==7]

major[cap==8,2] = zz[cap==8]
major[cap==8,0] = yy[cap==8]
major[cap==8,1] = xx[cap==8]
major[cap==9,2] = zz[cap==9]
major[cap==9,0] =-yy[cap==9]
major[cap==9,1] = xx[cap==9]
major[cap==10,2] = zz[cap==10]
major[cap==10,0] = xx[cap==10]
major[cap==10,1] = yy[cap==10]
major[cap==11,2] = zz[cap==11]
major[cap==11,0] = xx[cap==11]
major[cap==11,1] =-yy[cap==11]

# Next, we can get the minor axis
az = (iaz+0.5)*(1.0/EULER_ABIN)*np.pi
Expand All @@ -1265,13 +1285,16 @@ def _unpack_euler16(bin_this):
# are perpendicular.

eq2 = (cap//4) == 2
minor[eq2,0] = xx[eq2]; minor[eq2,1] = yy[eq2];
minor[eq2,0] = xx[eq2]
minor[eq2,1] = yy[eq2]
minor[eq2,2] = (minor[eq2,0]*major[eq2,0]+minor[eq2,1]*major[eq2,1])/(-major[eq2,2])
eq4 = (cap//4) == 0
minor[eq4,1] = xx[eq4]; minor[eq4,2] = yy[eq4];
minor[eq4,1] = xx[eq4]
minor[eq4,2] = yy[eq4]
minor[eq4,0] = (minor[eq4,1]*major[eq4,1]+minor[eq4,2]*major[eq4,2])/(-major[eq4,0])
eq1 = (cap//4) == 1
minor[eq1,2] = xx[eq1]; minor[eq1,0] = yy[eq1];
minor[eq1,2] = xx[eq1]
minor[eq1,0] = yy[eq1]
minor[eq1,1] = (minor[eq1,2]*major[eq1,2]+minor[eq1,0]*major[eq1,0])/(-major[eq1,1])
minor *= (1./np.linalg.norm(minor,axis=1).reshape(N,1))

Expand Down
4 changes: 2 additions & 2 deletions abacusnbody/data/pipe_asdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@
import gc
import sys
from os.path import isfile
from os.path import join as pjoin
from timeit import default_timer as timer

import asdf
Expand Down Expand Up @@ -133,7 +132,8 @@ def unpack_to_pipe(asdf_fns, fields, data_key=DEFAULT_DATA_KEY, header_key=DEFAU
arr = af[data_key][field][:] # read + decompression happens here
read_time += timer() - read_start_time
pipe.write(arr)
del arr; gc.collect()
del arr
gc.collect()
nbytes_tot += N*field_width
pipe.close() # signal EOF
tot_time = timer() - start_time
Expand Down
27 changes: 14 additions & 13 deletions abacusnbody/hod/GRAND_HOD.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import numpy as np
from astropy.io import ascii
from astropy.table import Table
from numba import jit, njit, types
from numba import njit, types
from numba.typed import Dict

# import yaml
Expand Down Expand Up @@ -133,20 +133,20 @@ def gen_cent(pos, vel, mass, ids, multis, randoms, vdev, deltac, fenv,

# parse out the hod parameters
logM_cut_L, logM1_L, sigma_L, alpha_L, kappa_L = \
LRG_design_array[0], LRG_design_array[1], LRG_design_array[2], LRG_design_array[3], LRG_design_array[4]
LRG_design_array[0], LRG_design_array[1], LRG_design_array[2], LRG_design_array[3], LRG_design_array[4] # noqa
ic_L, alpha_c_L, Ac_L, Bc_L = LRG_decorations_array[10], LRG_decorations_array[0], \
LRG_decorations_array[6], LRG_decorations_array[8]

pmax_E, Q_E, logM_cut_E, kappa_E, sigma_E, logM1_E, alpha_E, gamma_E = \
ELG_design_array[0], ELG_design_array[1], ELG_design_array[2], ELG_design_array[3], ELG_design_array[4],\
ELG_design_array[5], ELG_design_array[6], ELG_design_array[7]
ELG_design_array[5], ELG_design_array[6], ELG_design_array[7] # noqa
alpha_c_E, Ac_E, Bc_E, ic_E = ELG_decorations_array[0], ELG_decorations_array[6], ELG_decorations_array[8],\
ELG_decorations_array[10]
ELG_decorations_array[10] # noqa

logM_cut_Q, kappa_Q, sigma_Q, logM1_Q, alpha_Q = \
QSO_design_array[0], QSO_design_array[1], QSO_design_array[2], QSO_design_array[3], QSO_design_array[4]
QSO_design_array[0], QSO_design_array[1], QSO_design_array[2], QSO_design_array[3], QSO_design_array[4] # noqa
alpha_c_Q, Ac_Q, Bc_Q, ic_Q = QSO_decorations_array[0], QSO_decorations_array[6], QSO_decorations_array[8],\
QSO_decorations_array[10]
QSO_decorations_array[10] # noqa

H = len(mass)

Expand All @@ -171,7 +171,7 @@ def gen_cent(pos, vel, mass, ids, multis, randoms, vdev, deltac, fenv,
ELG_marker += N_cen_ELG_v1(mass[i], pmax_E, Q_E, logM_cut_E_temp, sigma_E, gamma_E) * ic_E * multis[i]
QSO_marker = ELG_marker
if want_QSO:
logM_cut_Q_temp = logM_cut_Q + Ac_Q * deltac[i] + Bc_Q * fenv[i]
# logM_cut_Q_temp = logM_cut_Q + Ac_Q * deltac[i] + Bc_Q * fenv[i]
QSO_marker += N_cen_QSO(mass[i], logM_cut_Q, sigma_Q) * ic_Q * multis[i]

if randoms[i] <= LRG_marker:
Expand Down Expand Up @@ -291,7 +291,7 @@ def gen_cent(pos, vel, mass, ids, multis, randoms, vdev, deltac, fenv,
qso_z[j3] = pos[i,2]
qso_vz[j3] = vel[i,2] + alpha_c_Q * vdev[i] # velocity bias
# rsd only applies to the z direction
if rsd and origin != None:
if rsd and origin is not None:
nx = qso_x[j3] - origin[0]
ny = qso_y[j3] - origin[1]
nz = qso_z[j3] - origin[2]
Expand Down Expand Up @@ -364,15 +364,15 @@ def gen_sats(ppos, pvel, hvel, hmass, hid, weights, randoms, hdeltac, hfenv,

pmax_E, Q_E, logM_cut_E, kappa_E, sigma_E, logM1_E, alpha_E, gamma_E, A_E = \
ELG_design_array[0], ELG_design_array[1], ELG_design_array[2], ELG_design_array[3], ELG_design_array[4],\
ELG_design_array[5], ELG_design_array[6], ELG_design_array[7], ELG_design_array[8]
ELG_design_array[5], ELG_design_array[6], ELG_design_array[7], ELG_design_array[8] # noqa
alpha_s_E, s_E, s_v_E, s_p_E, s_r_E, Ac_E, As_E, Bc_E, Bs_E, ic_E, delta_M1, delta_alpha, alpha1, beta, conf_c = \
ELG_decorations_array[1], ELG_decorations_array[2], ELG_decorations_array[3], ELG_decorations_array[4], \
ELG_decorations_array[5], ELG_decorations_array[6], ELG_decorations_array[7], ELG_decorations_array[8], \
ELG_decorations_array[9], ELG_decorations_array[10], ELG_decorations_array[11], ELG_decorations_array[12], \
ELG_decorations_array[13], ELG_decorations_array[14], ELG_decorations_array[15]

logM_cut_Q, kappa_Q, sigma_Q, logM1_Q, alpha_Q = \
QSO_design_array[0], QSO_design_array[1], QSO_design_array[2], QSO_design_array[3], QSO_design_array[4]
QSO_design_array[0], QSO_design_array[1], QSO_design_array[2], QSO_design_array[3], QSO_design_array[4] # noqa
alpha_s_Q, s_Q, s_v_Q, s_p_Q, s_r_Q, Ac_Q, As_Q, Bc_Q, Bs_Q, ic_Q = \
QSO_decorations_array[1], QSO_decorations_array[2], QSO_decorations_array[3], QSO_decorations_array[4], \
QSO_decorations_array[5], QSO_decorations_array[6], QSO_decorations_array[7], QSO_decorations_array[8], \
Expand Down Expand Up @@ -554,7 +554,7 @@ def gen_sats(ppos, pvel, hvel, hmass, hid, weights, randoms, hdeltac, hfenv,
qso_vy[j3] = hvel[i, 1] + alpha_s_Q * (pvel[i, 1] - hvel[i, 1]) # velocity bias
qso_z[j3] = ppos[i, 2]
qso_vz[j3] = hvel[i, 2] + alpha_s_Q * (pvel[i, 2] - hvel[i, 2]) # velocity bias
if rsd and origin != None:
if rsd and origin is not None:
nx = qso_x[j3] - origin[0]
ny = qso_y[j3] - origin[1]
nz = qso_z[j3] - origin[2]
Expand Down Expand Up @@ -907,7 +907,7 @@ def gen_gal_cat(halo_data, particle_data, tracers, params, Nthread = 16,
"""

if not type(rsd) is bool:
if type(rsd) is not bool:
raise ValueError("Error: rsd has to be a boolean")

# find the halos, populate them with galaxies and write them to files
Expand Down Expand Up @@ -938,7 +938,8 @@ def gen_gal_cat(halo_data, particle_data, tracers, params, Nthread = 16,
os.makedirs(outdir, exist_ok = True)

# save to file
outdict = HOD_dict[tracer].pop('Ncent', None)
# outdict =
HOD_dict[tracer].pop('Ncent', None)
table = Table(HOD_dict[tracer], meta = {'Ncent': Ncent, 'Gal_type': tracer, **tracers[tracer]})
if params['chunk'] == -1:
ascii.write(table, outdir / (f"{tracer}s.dat"), overwrite = True, format = 'ecsv')
Expand Down
24 changes: 12 additions & 12 deletions abacusnbody/hod/prepare_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,7 @@
import numba
import numpy as np
import yaml
from astropy.table import Table
from numba import jit, njit, types
from scipy.interpolate import NearestNDInterpolator
from scipy.ndimage import gaussian_filter
from numba import njit
from scipy.spatial import cKDTree

from abacusnbody.data.compaso_halo_catalog import CompaSOHaloCatalog
Expand Down Expand Up @@ -202,7 +199,7 @@ def concat_to_arr(lists, dtype=np.int64):
'''
starts = np.empty(len(lists) + 1, dtype=np.int64)
starts[0] = 0
starts[1:] = np.cumsum(np.fromiter((len(l) for l in lists), count=len(lists), dtype=np.int64))
starts[1:] = np.cumsum(np.fromiter((len(ell) for ell in lists), count=len(lists), dtype=np.int64))
N = starts[-1]
res = np.fromiter(itertools.chain.from_iterable(lists), count=N, dtype=dtype)
return res, starts
Expand Down Expand Up @@ -257,15 +254,18 @@ def do_Menv_from_tree(allpos, allmasses, r_inner, r_outer, halo_lc, Lbox, nthrea
r_inner = np.array(r_inner)[mmask]
allinds_inner = querypos_tree.query_ball_point(pos_cut, r = r_inner, workers = nthread)
inner_arr, inner_starts = concat_to_arr(allinds_inner) # 7 sec
del allinds_inner; gc.collect()
del allinds_inner
gc.collect()

if isinstance(r_outer, (list, tuple, np.ndarray)):
r_outer = np.array(r_outer)[mmask]
allinds_outer = querypos_tree.query_ball_point(pos_cut, r = r_outer, workers = nthread)
del querypos, querypos_tree; gc.collect()
del querypos, querypos_tree
gc.collect()

outer_arr, outer_starts = concat_to_arr(allinds_outer)
del allinds_outer; gc.collect()
del allinds_outer
gc.collect()

print("starting Menv")
numba.set_num_threads(nthread)
Expand Down Expand Up @@ -474,8 +474,8 @@ def prepare_slab(i, savedir, simdir, simname, z_mock, tracer_flags, MT, want_ran
ranksr_parts = np.full(len_old, -1.0)
ranksp_parts = np.full(len_old, -1.0)
ranksc_parts = np.full(len_old, -1.0)
pos_parts = np.full((len_old, 3), -1.0)
vel_parts = np.full((len_old, 3), -1.0)
# pos_parts = np.full((len_old, 3), -1.0)
# vel_parts = np.full((len_old, 3), -1.0)
hvel_parts = np.full((len_old, 3), -1.0)
Mh_parts = np.full(len_old, -1.0)
Np_parts = np.full(len_old, -1.0)
Expand Down Expand Up @@ -606,7 +606,7 @@ def prepare_slab(i, savedir, simdir, simname, z_mock, tracer_flags, MT, want_ran
if os.path.exists(outfilename_halos):
os.remove(outfilename_halos)
newfile = h5py.File(outfilename_halos, 'w')
dataset = newfile.create_dataset('halos', data = halos[mask_halos])
newfile.create_dataset('halos', data = halos[mask_halos])
newfile.close()

# output the new particle file
Expand Down Expand Up @@ -636,7 +636,7 @@ def prepare_slab(i, savedir, simdir, simname, z_mock, tracer_flags, MT, want_ran
if os.path.exists(outfilename_particles):
os.remove(outfilename_particles)
newfile = h5py.File(outfilename_particles, 'w')
dataset = newfile.create_dataset('particles', data = parts)
newfile.create_dataset('particles', data = parts)
newfile.close()

print("pre process particle number ", len_old, " post process particle number ", len(parts))
Expand Down
Loading

0 comments on commit fc54ef9

Please sign in to comment.