Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FIX: Replace deprecated np.int instances #558

Merged
merged 21 commits into from
Apr 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions qsiprep/cli/recon_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def recon_plot():
raise Exception('Requires either a mif file or fib file')

odf_4d = odf_img.get_fdata()
sphere = HemiSphere(xyz=directions.astype(np.float))
sphere = HemiSphere(xyz=directions.astype(float))
if not opts.background_image:
background_data = odf_4d.mean(3)
else:
Expand Down Expand Up @@ -381,4 +381,4 @@ def odf_roi_plot(odf_4d, halfsphere, background_data, out_file, roi_file,
mask_image=image_mask)
roi_image.paste(Image.open(cc_file), (2 * tile_size, 0))

roi_image.save(out_file)
roi_image.save(out_file)
10 changes: 5 additions & 5 deletions qsiprep/interfaces/anatomical.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def _run_interface(self, runtime):
orig_mask = img.get_fdata() > 0
eroded1 = ndimage.binary_erosion(orig_mask, iterations=3)
eroded2 = ndimage.binary_erosion(eroded1, iterations=3)
final = orig_mask.astype(np.int) + eroded1 + eroded2
final = orig_mask.astype(int) + eroded1 + eroded2
out_img = nb.Nifti1Image(final, img.affine, header=img.header)
out_fname = fname_presuffix(self.inputs.mask_file, suffix="_dseg",
newpath=runtime.cwd)
Expand Down Expand Up @@ -336,7 +336,7 @@ def _run_interface(self, runtime):

actual_brain_to_skull_ratio = brain_median / nonbrain_head_median
LOGGER.info("found brain to skull ratio: %.3f", actual_brain_to_skull_ratio)
desat_data = skulled_img.get_fdata(dtype=np.float32).copy()
desat_data = skulled_img.get_fdata(dtype='float32').copy()
adjustment = 1.
if actual_brain_to_skull_ratio < self.inputs.brain_to_skull_ratio:
# We need to downweight the non-brain voxels
Expand All @@ -347,7 +347,7 @@ def _run_interface(self, runtime):
desat_data[nonbrain_mask] = desat_data[nonbrain_mask] * adjustment

desat_img = nim.new_img_like(skulled_img, desat_data, copy_header=True)
desat_img.header.set_data_dtype(np.float32)
desat_img.header.set_data_dtype('float32')
desat_img.to_filename(out_file)
self._results['desaturated_t2w'] = out_file
self._results['head_scaling_factor'] = adjustment
Expand Down Expand Up @@ -415,7 +415,6 @@ def _run_interface(self, runtime):
self._results['out_file'] = out_file
return runtime


class _GetTemplateInputSpec(BaseInterfaceInputSpec):
template_name = traits.Str(
'MNI152NLin2009cAsym',
Expand Down Expand Up @@ -470,4 +469,5 @@ def _run_interface(self, runtime):
else:
raise NotImplementedError("Arbitrary templates not available yet")

return runtime
return runtime

18 changes: 9 additions & 9 deletions qsiprep/interfaces/converters.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def _run_interface(self, runtime):
else:
ampl_data = amplitudes_img.get_fdata()
ampl_mask = ampl_data.sum(3) > 1e-6
mask_img = nb.Nifti1Image(ampl_mask.astype(np.float),
mask_img = nb.Nifti1Image(ampl_mask.astype(float),
amplitudes_img.affine)

self._results['fib_file'] = output_fib_file
Expand Down Expand Up @@ -216,7 +216,7 @@ def amplitudes_to_fibgz(amplitudes_img, odf_dirs, odf_faces, output_file,
z0 = np.nanmax(masked_odfs)
masked_odfs = masked_odfs / z0
masked_odfs[masked_odfs < 0] = 0
masked_odfs = np.nan_to_num(masked_odfs).astype(np.float)
masked_odfs = np.nan_to_num(masked_odfs).astype(float)

if unit_odf:
sums = masked_odfs.sum(1)
Expand Down Expand Up @@ -247,19 +247,19 @@ def amplitudes_to_fibgz(amplitudes_img, odf_dirs, odf_faces, output_file,
# fill in the "fa" values
fa_n = np.zeros(n_voxels)
fa_n[flat_mask] = peak_vals[:, nfib]
dsi_mat['fa%d' % nfib] = fa_n.astype(np.float32)
dsi_mat['fa%d' % nfib] = fa_n.astype('float32')

# Fill in the index values
index_n = np.zeros(n_voxels)
index_n[flat_mask] = peak_indices[:, nfib]
dsi_mat['index%d' % nfib] = index_n.astype(np.int16)
dsi_mat['index%d' % nfib] = index_n.astype('int16')

# Add in the ODFs
num_odf_matrices = n_odfs // ODF_COLS
split_indices = (np.arange(num_odf_matrices) + 1) * ODF_COLS
odf_splits = np.array_split(masked_odfs, split_indices, axis=0)
for splitnum, odfs in enumerate(odf_splits):
dsi_mat['odf%d' % splitnum] = odfs.T.astype(np.float32)
dsi_mat['odf%d' % splitnum] = odfs.T.astype('float32')

dsi_mat['odf_vertices'] = odf_dirs.T
dsi_mat['odf_faces'] = odf_faces.T
Expand Down Expand Up @@ -340,7 +340,7 @@ def amico_directions_to_fibgz(directions_img, od_img, icvf_img, isovf_img,
# fill in the "dir" values
dir0 = np.zeros(n_voxels)
dir0[flat_mask] = peak_indices
dsi_mat['index0'] = dir0.astype(np.int16)
dsi_mat['index0'] = dir0.astype('int16')
dsi_mat['fa0'] = icvf_vec
dsi_mat['ICVF0'] = icvf_vec
dsi_mat['ISOVF0'] = isovf_vec
Expand Down Expand Up @@ -454,7 +454,7 @@ def is_exe(fpath):

def fib2amps(fib_file, ref_image, subtract_iso=True):
fibmat = fast_load_fibgz(fib_file)
dims = tuple(fibmat['dimension'].squeeze().astype(np.int))
dims = tuple(fibmat['dimension'].squeeze().astype(int))
directions = fibmat['odf_vertices'].T

odf_vars = [k for k in fibmat.keys() if re.match("odf\\d+", k)]
Expand All @@ -477,7 +477,7 @@ def fib2amps(fib_file, ref_image, subtract_iso=True):
# Convert each column to a 3d file, then concatenate them
odfs_3d = []
for odf_vals in odf_array.T:
new_data = np.zeros(n_voxels, dtype=np.float32)
new_data = np.zeros(n_voxels, dtype='float32')
new_data[flat_mask] = odf_vals
odfs_3d.append(new_data.reshape(dims, order="F"))

Expand All @@ -495,7 +495,7 @@ def peaks_to_odfs(fibdict):
flat_mask = fibdict['fa0'].squeeze().ravel(order='F') > 0
num_directions = fibdict['odf_vertices'].shape[1]
num_odfs = flat_mask.sum()
odfs = np.zeros((num_odfs, num_directions), dtype=np.float32)
odfs = np.zeros((num_odfs, num_directions), dtype='float32')
row_indices = np.arange(num_odfs)
for peak_num in range(num_indexes):
fa_values = fibdict[
Expand Down
10 changes: 5 additions & 5 deletions qsiprep/interfaces/dipy.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def _get_mask(self, amplitudes_img, gtab):

# Needed for synthetic data
mask_array = mask_array * (dwi_data.sum(3) > 0)
mask_img = nb.Nifti1Image(mask_array.astype(np.float32), amplitudes_img.affine,
mask_img = nb.Nifti1Image(mask_array.astype('float32'), amplitudes_img.affine,
amplitudes_img.header)
else:
mask_img = nb.load(self.inputs.mask_file)
Expand Down Expand Up @@ -403,7 +403,7 @@ class MAPMRIReconstruction(DipyReconInterface):
def _run_interface(self, runtime):
gtab = self._get_gtab()
dwi_img = nb.load(self.inputs.dwi_file)
data = dwi_img.get_fdata(dtype=np.float32)
data = dwi_img.get_fdata(dtype='float32')
mask_img, mask_array = self._get_mask(dwi_img, gtab)
weighting = "GCV" if self.inputs.laplacian_weighting == "GCV" else \
self.inputs.laplacian_weighting
Expand Down Expand Up @@ -549,7 +549,7 @@ def _run_interface(self, runtime):
b0s_mask = gtab.b0s_mask
dwis_mask = np.logical_not(b0s_mask)
dwi_img = nb.load(self.inputs.dwi_file)
dwi_data = dwi_img.get_fdata(dtype=np.float32)
dwi_data = dwi_img.get_fdata(dtype='float32')
b0_images = dwi_data[..., b0s_mask]
b0_mean = b0_images.mean(3)
dwi_images = dwi_data[..., dwis_mask]
Expand Down Expand Up @@ -652,7 +652,7 @@ class TensorReconstruction(DipyReconInterface):
def _run_interface(self, runtime):
gtab = self._get_gtab()
dwi_img = nb.load(self.inputs.dwi_file)
dwi_data = dwi_img.get_fdata(dtype=np.float32)
dwi_data = dwi_img.get_fdata(dtype='float32')
mask_img, mask_array = self._get_mask(dwi_img, gtab)

# Fit it
Expand Down Expand Up @@ -708,7 +708,7 @@ class KurtosisReconstruction(DipyReconInterface):
def _run_interface(self, runtime):
gtab = self._get_gtab()
dwi_img = nb.load(self.inputs.dwi_file)
dwi_data = dwi_img.get_fdata(dtype=np.float32)
dwi_data = dwi_img.get_fdata(dtype='float32')
mask_img, mask_array = self._get_mask(dwi_img, gtab)

# Fit it
Expand Down
4 changes: 2 additions & 2 deletions qsiprep/interfaces/dsi_studio.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def _post_run_hook(self, runtime):
atlas_name = self.inputs.atlas_name

# Aggregate the connectivity/network data from DSI Studio
official_labels = np.array(atlas_config['node_ids']).astype(np.int)
official_labels = np.array(atlas_config['node_ids']).astype(int)
connectivity_data = {
atlas_name + "_region_ids": official_labels,
atlas_name + "_region_labels": np.array(atlas_config['node_names'])
Expand Down Expand Up @@ -533,7 +533,7 @@ def _sanitized_network_measures(network_txt, official_labels, atlas_name, measur
n_atlas_labels = len(official_labels)
network_data = _parse_network_file(network_txt)
# Make sure to get the full atlas
network_region_ids = np.array(network_data['region_ids']).astype(np.int)
network_region_ids = np.array(network_data['region_ids']).astype(int)
# If all the regions are found
in_this_mask = np.isin(official_labels, network_region_ids)
if np.all(in_this_mask):
Expand Down
2 changes: 1 addition & 1 deletion qsiprep/interfaces/epi_fmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ def split_into_b0s_and_origins(b0_threshold, original_files, img_file, cwd,
else:
# Assume they're all b=0
b0_indices = np.array([0]) if full_img.ndim < 4 else \
np.arange(full_img.shape[3], dtype=np.int)
np.arange(full_img.shape[3], dtype=int)

relative_indices = relative_b0_index(b0_indices, original_files)

Expand Down
2 changes: 1 addition & 1 deletion qsiprep/interfaces/fmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,7 @@ def get_evenly_spaced_b0s(b0_indices, max_per_spec):
if len(b0_indices) <= max_per_spec:
return b0_indices
selected_indices = np.linspace(0, len(b0_indices)-1, num=max_per_spec,
endpoint=True, dtype=np.int)
endpoint=True, dtype=int)
return [b0_indices[idx] for idx in selected_indices]


Expand Down
6 changes: 3 additions & 3 deletions qsiprep/interfaces/gradients.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def _run_interface(self, runtime):
mask_img = nb.load(self.inputs.mask_image)
mask = mask_img.get_fdata() > 0
masked_slices = (mask * np.arange(mask_img.shape[2])[np.newaxis, np.newaxis, :]
).astype(np.int)
).astype(int)
slice_nums, slice_counts = np.unique(masked_slices[mask], return_counts=True)
min_size = np.percentile(slice_counts, self.inputs.min_slice_size_percentile)
too_small = slice_nums[slice_counts < min_size]
Expand Down Expand Up @@ -295,7 +295,7 @@ def _run_interface(self, runtime):
output_mean_fname = fname_presuffix(output_fname, suffix='_mean',
use_ext=True, newpath=runtime.cwd)
if isdefined(self.inputs.b0_indices):
indices = np.array(self.inputs.b0_indices).astype(np.int)
indices = np.array(self.inputs.b0_indices).astype(int)
elif isdefined(self.inputs.bval_file):
bvals = np.loadtxt(self.inputs.bval_file)
indices = np.flatnonzero(bvals < self.inputs.b0_threshold)
Expand Down Expand Up @@ -703,7 +703,7 @@ def concatenate_bvecs(input_files):
else:
collected_vecs = []
for bvec_file in input_files:
collected_vecs.append(np.loadtxt(bvec_file).astype(np.float))
collected_vecs.append(np.loadtxt(bvec_file).astype(float))
stacked = np.row_stack(collected_vecs)
if not stacked.shape[1] == 3:
stacked = stacked.T
Expand Down
4 changes: 2 additions & 2 deletions qsiprep/interfaces/mrtrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -804,7 +804,7 @@ def _post_run_hook(self, runtime):
atlas_name = self.inputs.atlas_name

# Aggregate the connectivity/network data from DSI Studio
official_labels = np.array(atlas_config['node_ids']).astype(np.int)
official_labels = np.array(atlas_config['node_ids']).astype(int)
connectivity_data = {
atlas_name + "_region_ids": official_labels,
atlas_name + "_region_labels": np.array(atlas_config['node_names'])
Expand Down Expand Up @@ -1352,4 +1352,4 @@ class _TransformHeaderOutputSpec(TraitedSpec):
class TransformHeader(CommandLine):
input_spec = _TransformHeaderInputSpec
output_spec = _TransformHeaderOutputSpec
_cmd = "mrtransform -strides -1,-2,3"
_cmd = "mrtransform -strides -1,-2,3"
12 changes: 6 additions & 6 deletions qsiprep/interfaces/nilearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,9 +203,9 @@ def _run_interface(self, runtime):
self._results['plotting_mask_file'] = plotting_mask

# Make a smoothed mask for N4
dilated_mask = ndimage.binary_dilation(mask_img.get_fdata().astype(np.int),
dilated_mask = ndimage.binary_dilation(mask_img.get_fdata().astype(int),
structure=sim.cube(3))
smoothed_dilated_mask = ndimage.gaussian_filter(dilated_mask.astype(np.float), sigma=3)
smoothed_dilated_mask = ndimage.gaussian_filter(dilated_mask.astype(float), sigma=3)
weight_img = new_img_like(input_img, smoothed_dilated_mask)

# Find a bias field and correct the original input
Expand Down Expand Up @@ -433,7 +433,7 @@ def calculate_gradmax_b0_mask(b0_nii, show_plot=False, quantile_max=0.8, pad_siz

# Send it out for post processing
edge_scores = []
opening_values = np.array([2, 4, 6, 8, 10, 12], dtype=np.int)
opening_values = np.array([2, 4, 6, 8, 10, 12], dtype=int)
opened_masks = []
selected_voxels = []
for opening_test in opening_values:
Expand Down Expand Up @@ -526,7 +526,7 @@ def watershed_refined_b0_mask(b0_nii, show_plot=False, pad_size=10, quantile_max
ribbon_mask = (dilated_mask ^ eroded_mask)

# Down-weight data deep in the mask
inner_weights = ndimage.gaussian_filter(eroded_mask.astype(np.float), sigma=morph_size)
inner_weights = ndimage.gaussian_filter(eroded_mask.astype(float), sigma=morph_size)
inner_weights = 1. - inner_weights / inner_weights.max()

# Down-weight data as it gets far from the mask
Expand Down Expand Up @@ -575,8 +575,8 @@ def select_markers_for_rw(image, inner_mask, empty_mask, outer_mask,
markers = np.zeros_like(image) - 1.
use_as_inner_marker = np.random.rand(inner_mask.sum()) < sample_proportion
use_as_outer_marker = np.random.rand(outer_mask.sum()) < sample_proportion
markers[inner_mask > 0] = use_as_inner_marker.astype(np.int) * 2
markers[outer_mask > 0] = use_as_outer_marker.astype(np.int) * 1
markers[inner_mask > 0] = use_as_inner_marker.astype(int) * 2
markers[outer_mask > 0] = use_as_outer_marker.astype(int) * 1
markers[empty_mask > 0] = 0

return markers
2 changes: 1 addition & 1 deletion qsiprep/interfaces/niworkflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(self, sliceqc_file, mask_file, confounds, usecols=None, units=None,
mask_img = nb.load(mask_file)
mask = mask_img.get_fdata() > 0
masked_slices = (mask * np.arange(mask_img.shape[2])[np.newaxis, np.newaxis, :]
).astype(np.int)
).astype(int)
slice_nums, slice_counts = np.unique(masked_slices[mask], return_counts=True)
self.qc_data = {
'slice_scores': slice_scores,
Expand Down
2 changes: 1 addition & 1 deletion qsiprep/interfaces/patch2self.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def patch2self(data, bvals, patch_radius=[0, 0, 0], model='ols',
Denoising Diffusion MRI with Self-supervised Learning,
Advances in Neural Information Processing Systems 33 (2020)
"""
patch_radius = np.asarray(patch_radius, dtype=np.int)
patch_radius = np.asarray(patch_radius, dtype=int)

if not data.ndim == 4:
raise ValueError("Patch2Self can only denoise on 4D arrays.",
Expand Down
12 changes: 6 additions & 6 deletions qsiprep/interfaces/tortoise.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,10 +170,10 @@ def _run_interface(self, runtime):
down_row = selected_images.loc[1]
up_img = to_lps(
safe_get_3d_image(up_row.bids_origin_file, up_row.original_volume))
up_img.set_data_dtype(np.float32)
up_img.set_data_dtype('float32')
down_img = to_lps(
safe_get_3d_image(down_row.bids_origin_file, down_row.original_volume))
down_img.set_data_dtype(np.float32)
down_img.set_data_dtype('float32')

# Save the images
blip_up_nii = op.join(runtime.cwd, "blip_up_b0.nii")
Expand Down Expand Up @@ -605,15 +605,15 @@ def split_into_up_and_down_niis(dwi_files, bval_files, bvec_files, original_imag
return blip_assignments

# Write the 4d up image
up_4d = nim.concat_imgs(up_images, dtype=np.float32, auto_resample=False)
up_4d.set_data_dtype(np.float32)
up_4d = nim.concat_imgs(up_images, dtype='float32', auto_resample=False)
up_4d.set_data_dtype('float32')
up_4d.to_filename(up_dwi_file)
up_bval_file, up_bvec_file = write_concatenated_fsl_gradients(
up_bvals, up_bvecs, up_prefix)

# Write the 4d down image
down_4d = nim.concat_imgs(down_images, dtype=np.float32, auto_resample=False)
down_4d.set_data_dtype(np.float32)
down_4d = nim.concat_imgs(down_images, dtype='float32', auto_resample=False)
down_4d.set_data_dtype('float32')
down_4d.to_filename(down_dwi_file)
down_bval_file, down_bvec_file = write_concatenated_fsl_gradients(
down_bvals, down_bvecs, down_prefix)
Expand Down
2 changes: 1 addition & 1 deletion qsiprep/interfaces/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def _run_interface(self, runtime):
out_file = fname_presuffix(first_fname, suffix='_tpmsum',
newpath=runtime.cwd)
newnii = im.__class__(data, im.affine, im.header)
newnii.set_data_dtype(np.float32)
newnii.set_data_dtype('float32')

# Set visualization thresholds
newnii.header['cal_max'] = 1.0
Expand Down
4 changes: 2 additions & 2 deletions qsiprep/utils/brainsuite_shore.py
Original file line number Diff line number Diff line change
Expand Up @@ -622,7 +622,7 @@ def create_rspace(gridsize, radius_max):
for k in range(-radius, radius + 1):
vecs.append([i, j, k])

vecs = np.array(vecs, dtype=np.float32)
vecs = np.array(vecs, dtype='float32')
tab = vecs / radius
tab = tab * radius_max
vecs = vecs + radius
Expand All @@ -636,7 +636,7 @@ def shore_index_matrix(radial_order):
for l in range(0, n + 1, 2):
for m in range(-l, l + 1):
indices.append((n, l, m))
return np.array(indices).astype(np.int)
return np.array(indices).astype(int)


def shore_matrix_pdf(radial_order, zeta, rtab):
Expand Down