From 231293eac2bff39a73016d172ebf5f2961347b3e Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 9 Oct 2021 17:25:23 +0800 Subject: [PATCH 1/7] [DLMED] update intensity transforms Signed-off-by: Nic Ma --- monai/transforms/intensity/array.py | 99 +++++++----- monai/transforms/intensity/dictionary.py | 148 ++++++++---------- tests/test_rand_adjust_contrastd.py | 2 +- ..._bias_field.py => test_rand_bias_field.py} | 11 +- ...ias_fieldd.py => test_rand_bias_fieldd.py} | 8 +- tests/test_rand_gaussian_noised.py | 3 +- tests/test_rand_scale_intensity.py | 2 + tests/test_rand_scale_intensityd.py | 2 + tests/test_rand_shift_intensity.py | 2 + tests/test_rand_shift_intensityd.py | 4 + tests/test_rand_std_shift_intensity.py | 2 + tests/test_rand_std_shift_intensityd.py | 2 + 12 files changed, 151 insertions(+), 134 deletions(-) rename tests/{test_random_bias_field.py => test_rand_bias_field.py} (87%) rename tests/{test_random_bias_fieldd.py => test_rand_bias_fieldd.py} (91%) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 376c2c811f..9792d2fc86 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -99,8 +99,8 @@ def randomize(self, data: Any) -> None: super().randomize(None) self._rand_std = self.R.uniform(0, self.std) - def _add_noise(self, img: NdarrayOrTensor) -> NdarrayOrTensor: - noise = self.R.normal(self.mean, self._rand_std, size=img.shape) + def compute(self, img: NdarrayOrTensor, mean: float = 0.0) -> NdarrayOrTensor: + noise = self.R.normal(mean, self._rand_std, size=img.shape) noise_, *_ = convert_to_dst_type(noise, img) return img + noise_ @@ -111,7 +111,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: self.randomize(None) if not self._do_transform: return img - return self._add_noise(img) + return self.compute(img=img, mean=self.mean) class RandRicianNoise(RandomizableTransform): @@ -171,13 +171,7 @@ def _add_noise(self, img: NdarrayTensor, mean: float, std: float): return np.sqrt((img + self._noise1) ** 2 + self._noise2 ** 2) - def __call__(self, img: NdarrayTensor) -> NdarrayTensor: - """ - Apply the transform to `img`. - """ - super().randomize(None) - if not self._do_transform: - return img + def compute(self, img: NdarrayTensor) -> NdarrayTensor: if self.channel_wise: _mean = ensure_tuple_rep(self.mean, len(img)) _std = ensure_tuple_rep(self.std, len(img)) @@ -194,6 +188,16 @@ def __call__(self, img: NdarrayTensor) -> NdarrayTensor: img = self._add_noise(img, mean=self.mean, std=std) return img + def __call__(self, img: NdarrayTensor) -> NdarrayTensor: + """ + Apply the transform to `img`. + """ + super().randomize(None) + if self._do_transform: + img = self.compute(img=img) + + return img + class ShiftIntensity(Transform): """ @@ -245,8 +249,11 @@ def __init__(self, offsets: Union[Tuple[float, float], float], prob: float = 0.1 self._shfiter = ShiftIntensity(self._offset) def randomize(self, data: Optional[Any] = None) -> None: - self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) super().randomize(None) + self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) + + def compute(self, img: NdarrayOrTensor, factor: Optional[float] = None) -> NdarrayOrTensor: + return self._shfiter(img, self._offset if factor is None else self._offset * factor) def __call__(self, img: NdarrayOrTensor, factor: Optional[float] = None) -> NdarrayOrTensor: """ @@ -259,9 +266,10 @@ def __call__(self, img: NdarrayOrTensor, factor: Optional[float] = None) -> Ndar """ self.randomize() - if not self._do_transform: - return img - return self._shfiter(img, self._offset if factor is None else self._offset * factor) + if self._do_transform: + img = self.compute(img=img, factor=factor) + + return img class StdShiftIntensity(Transform): @@ -357,20 +365,24 @@ def __init__( self.dtype = dtype def randomize(self, data: Optional[Any] = None) -> None: - self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) super().randomize(None) + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) + + def compute(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + shifter = StdShiftIntensity( + factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise, dtype=self.dtype + ) + return shifter(img) def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: """ Apply the transform to `img`. """ self.randomize() - if not self._do_transform: - return img - shifter = StdShiftIntensity( - factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise, dtype=self.dtype - ) - return shifter(img) + if self._do_transform: + img = self.compute(img=img) + + return img class ScaleIntensity(Transform): @@ -459,18 +471,22 @@ def __init__( self.dtype = dtype def randomize(self, data: Optional[Any] = None) -> None: - self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) super().randomize(None) + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) + + def compute(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + scaler = ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype) + return scaler(img) def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: """ Apply the transform to `img`. """ self.randomize() - if not self._do_transform: - return img - scaler = ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype) - return scaler(img) + if self._do_transform: + img = self.compute(img=img) + + return img class RandBiasField(RandomizableTransform): @@ -498,7 +514,7 @@ def __init__( degree: int = 3, coeff_range: Tuple[float, float] = (0.0, 0.1), dtype: DtypeLike = np.float32, - prob: float = 1.0, + prob: float = 0.1, ) -> None: RandomizableTransform.__init__(self, prob) if degree < 1: @@ -537,14 +553,8 @@ def randomize(self, data: np.ndarray) -> None: n_coeff = int(np.prod([(self.degree + k) / k for k in range(1, len(data.shape[1:]) + 1)])) self._coeff = self.R.uniform(*self.coeff_range, n_coeff).tolist() - def __call__(self, img: np.ndarray): - """ - Apply the transform to `img`. - """ + def compute(self, img: np.ndarray): img, *_ = convert_data_type(img, np.ndarray) # type: ignore - self.randomize(data=img) - if not self._do_transform: - return img num_channels, *spatial_shape = img.shape _bias_fields = np.stack( [ @@ -555,6 +565,16 @@ def __call__(self, img: np.ndarray): ) return (img * np.exp(_bias_fields)).astype(self.dtype) + def __call__(self, img: np.ndarray): + """ + Apply the transform to `img`. + """ + self.randomize(data=img) + if self._do_transform: + img = self.compute(img=img) + + return img + class NormalizeIntensity(Transform): """ @@ -785,6 +805,9 @@ def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1]) + def compute(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + return AdjustContrast(self.gamma_value)(img) + def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: """ Apply the transform to `img`. @@ -792,10 +815,10 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: self.randomize() if self.gamma_value is None: raise ValueError("gamma_value is not set.") - if not self._do_transform: - return img - adjuster = AdjustContrast(self.gamma_value) - return adjuster(img) + if self._do_transform: + img = self.compute(img=img) + + return img class ScaleIntensityRangePercentiles(Transform): diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 8681093168..e765608395 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -16,7 +16,7 @@ """ from collections.abc import Iterable -from typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Dict, Hashable, Mapping, Optional, Sequence, Tuple, Union import numpy as np import torch @@ -32,12 +32,16 @@ KSpaceSpikeNoise, MaskIntensity, NormalizeIntensity, + RandAdjustContrast, RandBiasField, RandCoarseDropout, RandCoarseShuffle, RandGaussianNoise, RandKSpaceSpikeNoise, RandRicianNoise, + RandScaleIntensity, + RandShiftIntensity, + RandStdShiftIntensity, ScaleIntensity, ScaleIntensityRange, ScaleIntensityRangePercentiles, @@ -47,7 +51,7 @@ ) from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform from monai.transforms.utils import is_positive -from monai.utils import convert_to_dst_type, ensure_tuple, ensure_tuple_rep, ensure_tuple_size +from monai.utils import ensure_tuple, ensure_tuple_rep, ensure_tuple_size from monai.utils.deprecated import deprecated_arg from monai.utils.enums import TransformBackends from monai.utils.type_conversion import convert_data_type @@ -166,22 +170,21 @@ def __init__( MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) self.mean = ensure_tuple_rep(mean, len(self.keys)) - self.std = std - self._noise: List[np.ndarray] = [] + self.rand_gaussian_noise = RandGaussianNoise(std=std) - def _add_noise(self, img: NdarrayTensor, mean: float) -> NdarrayTensor: - noise = self.R.normal(mean, self.R.uniform(0, self.std), size=img.shape) - noise_, *_ = convert_to_dst_type(noise, img) - return img + noise_ + def set_random_state(self, seed=None, state=None): + super().set_random_state(seed, state) + self.rand_gaussian_noise.set_random_state(seed, state) - def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: + def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) super().randomize(None) if not self._do_transform: return d for key, mean in self.key_iterator(d, self.mean): - d[key] = self._add_noise(img=d[key], mean=mean) + self.rand_gaussian_noise.randomize(None) + d[key] = self.rand_gaussian_noise.compute(img=d[key], mean=mean) return d @@ -238,7 +241,8 @@ def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, Nda if not self._do_transform: return d for key in self.key_iterator(d): - d[key] = self.rand_rician_noise(d[key]) + self.rand_rician_noise.randomize(None) + d[key] = self.rand_rician_noise.compute(d[key]) return d @@ -304,7 +308,7 @@ class RandShiftIntensityd(RandomizableTransform, MapTransform): Dictionary-based version :py:class:`monai.transforms.RandShiftIntensity`. """ - backend = ShiftIntensity.backend + backend = RandShiftIntensity.backend def __init__( self, @@ -343,36 +347,31 @@ def __init__( MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - if isinstance(offsets, (int, float)): - self.offsets = (min(-offsets, offsets), max(-offsets, offsets)) - else: - if len(offsets) != 2: - raise ValueError("offsets should be a number or pair of numbers.") - self.offsets = (min(offsets), max(offsets)) - self._offset = self.offsets[0] self.factor_key = ensure_tuple_rep(factor_key, len(self.keys)) self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys) if len(self.keys) != len(self.meta_keys): raise ValueError("meta_keys should have the same length as keys.") self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys)) - self.shifter = ShiftIntensity(self._offset) + self.shifter = RandShiftIntensity(offsets=offsets) - def randomize(self, data: Optional[Any] = None) -> None: - self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) - super().randomize(None) + def set_random_state(self, seed=None, state=None): + super().set_random_state(seed, state) + self.shifter.set_random_state(seed, state) def __call__(self, data) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - self.randomize() + super().randomize(None) if not self._do_transform: return d + + # all the keys share the same random shift factor + self.shifter.randomize(None) for key, factor_key, meta_key, meta_key_postfix in self.key_iterator( d, self.factor_key, self.meta_keys, self.meta_key_postfix ): meta_key = meta_key or f"{key}_{meta_key_postfix}" factor: Optional[float] = d[meta_key].get(factor_key) if meta_key in d else None - offset = self._offset if factor is None else self._offset * factor - d[key] = self.shifter(d[key], offset=offset) + d[key] = self.shifter.compute(d[key], factor=factor) return d @@ -418,7 +417,7 @@ class RandStdShiftIntensityd(RandomizableTransform, MapTransform): Dictionary-based version :py:class:`monai.transforms.RandStdShiftIntensity`. """ - backend = StdShiftIntensity.backend + backend = RandStdShiftIntensity.backend def __init__( self, @@ -444,30 +443,22 @@ def __init__( """ MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) + self.shifter = RandStdShiftIntensity(factors=factors, nonzero=nonzero, channel_wise=channel_wise, dtype=dtype) - if isinstance(factors, (int, float)): - self.factors = (min(-factors, factors), max(-factors, factors)) - elif len(factors) != 2: - raise ValueError("factors should be a number or pair of numbers.") - else: - self.factors = (min(factors), max(factors)) - self.factor = self.factors[0] - self.nonzero = nonzero - self.channel_wise = channel_wise - self.dtype = dtype - - def randomize(self, data: Optional[Any] = None) -> None: - self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) - super().randomize(None) + def set_random_state(self, seed=None, state=None): + super().set_random_state(seed, state) + self.shifter.set_random_state(seed, state) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - self.randomize() + super().randomize(None) if not self._do_transform: return d - shifter = StdShiftIntensity(self.factor, self.nonzero, self.channel_wise, self.dtype) + + # all the keys share the same random shift factor + self.shifter.randomize(None) for key in self.key_iterator(d): - d[key] = shifter(d[key]) + d[key] = self.shifter.compute(d[key]) return d @@ -519,7 +510,7 @@ class RandScaleIntensityd(RandomizableTransform, MapTransform): Dictionary-based version :py:class:`monai.transforms.RandScaleIntensity`. """ - backend = ScaleIntensity.backend + backend = RandScaleIntensity.backend def __init__( self, @@ -543,28 +534,22 @@ def __init__( """ MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) + self.scaler = RandScaleIntensity(factors=factors, dtype=dtype) - if isinstance(factors, (int, float)): - self.factors = (min(-factors, factors), max(-factors, factors)) - elif len(factors) != 2: - raise ValueError("factors should be a number or pair of numbers.") - else: - self.factors = (min(factors), max(factors)) - self.factor = self.factors[0] - self.dtype = dtype - - def randomize(self, data: Optional[Any] = None) -> None: - self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) - super().randomize(None) + def set_random_state(self, seed=None, state=None): + super().set_random_state(seed, state) + self.scaler.set_random_state(seed, state) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - self.randomize() + super().randomize(None) if not self._do_transform: return d - scaler = ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype) + + # all the keys share the same random scale factor + self.scaler.randomize(None) for key in self.key_iterator(d): - d[key] = scaler(d[key]) + d[key] = self.scaler.compute(d[key]) return d @@ -599,16 +584,19 @@ def __init__( self.rand_bias_field = RandBiasField(degree, coeff_range, dtype, prob) - def randomize(self, data: Optional[Any] = None) -> None: - super().randomize(None) + def set_random_state(self, seed=None, state=None): + super().set_random_state(seed, state) + self.scaler.set_random_state(seed, state) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - self.randomize() + super().randomize(None) if not self._do_transform: return d + for key in self.key_iterator(d): - d[key] = self.rand_bias_field(d[key]) + self.rand_bias_field.randomize(d[key]) + d[key] = self.rand_bias_field.compute(d[key]) return d @@ -765,7 +753,7 @@ class RandAdjustContrastd(RandomizableTransform, MapTransform): allow_missing_keys: don't raise exception if key is missing. """ - backend = AdjustContrast.backend + backend = RandAdjustContrast.backend def __init__( self, @@ -776,34 +764,22 @@ def __init__( ) -> None: MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) + self.adjuster = RandAdjustContrast(gamma=gamma) - if isinstance(gamma, (int, float)): - if gamma <= 0.5: - raise ValueError( - "if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)" - ) - self.gamma = (0.5, gamma) - elif len(gamma) != 2: - raise ValueError("gamma should be a number or pair of numbers.") - else: - self.gamma = (min(gamma), max(gamma)) - - self.gamma_value: Optional[float] = None - - def randomize(self, data: Optional[Any] = None) -> None: - super().randomize(None) - self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1]) + def set_random_state(self, seed=None, state=None): + super().set_random_state(seed, state) + self.adjuster.set_random_state(seed, state) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - self.randomize() - if self.gamma_value is None: - raise RuntimeError("gamma_value is not set.") + super().randomize(None) if not self._do_transform: return d - adjuster = AdjustContrast(self.gamma_value) + + # all the keys share the same random gamma value + self.adjuster.randomize(None) for key in self.key_iterator(d): - d[key] = adjuster(d[key]) + d[key] = self.adjuster.compute(d[key]) return d diff --git a/tests/test_rand_adjust_contrastd.py b/tests/test_rand_adjust_contrastd.py index 026828a9a3..87a3752b26 100644 --- a/tests/test_rand_adjust_contrastd.py +++ b/tests/test_rand_adjust_contrastd.py @@ -32,7 +32,7 @@ def test_correct_results(self, gamma): img_min = self.imt.min() img_range = self.imt.max() - img_min expected = ( - np.power(((self.imt - img_min) / float(img_range + epsilon)), adjuster.gamma_value) * img_range + np.power(((self.imt - img_min) / float(img_range + epsilon)), adjuster.adjuster.gamma_value) * img_range + img_min ) assert_allclose(expected, result["img"], rtol=1e-05, type_test=False) diff --git a/tests/test_random_bias_field.py b/tests/test_rand_bias_field.py similarity index 87% rename from tests/test_random_bias_field.py rename to tests/test_rand_bias_field.py index 5aeeb79874..883ee03549 100644 --- a/tests/test_random_bias_field.py +++ b/tests/test_rand_bias_field.py @@ -16,10 +16,13 @@ from monai.transforms import RandBiasField -TEST_CASES_2D = [{}, (3, 32, 32)] -TEST_CASES_3D = [{}, (3, 32, 32, 32)] -TEST_CASES_2D_ZERO_RANGE = [{"coeff_range": (0.0, 0.0)}, (2, 3, 3)] -TEST_CASES_2D_ONES = [{"coeff_range": (1.0, 1.0)}, np.asarray([[[7.389056, 0.1353353], [7.389056, 22026.46]]])] +TEST_CASES_2D = [{"prob": 1.0}, (3, 32, 32)] +TEST_CASES_3D = [{"prob": 1.0}, (3, 32, 32, 32)] +TEST_CASES_2D_ZERO_RANGE = [{"prob": 1.0, "coeff_range": (0.0, 0.0)}, (2, 3, 3)] +TEST_CASES_2D_ONES = [ + {"prob": 1.0, "coeff_range": (1.0, 1.0)}, + np.asarray([[[7.389056, 0.1353353], [7.389056, 22026.46]]]), +] class TestRandBiasField(unittest.TestCase): diff --git a/tests/test_random_bias_fieldd.py b/tests/test_rand_bias_fieldd.py similarity index 91% rename from tests/test_random_bias_fieldd.py rename to tests/test_rand_bias_fieldd.py index aa2e206de9..b82d435f40 100644 --- a/tests/test_random_bias_fieldd.py +++ b/tests/test_rand_bias_fieldd.py @@ -16,11 +16,11 @@ from monai.transforms import RandBiasFieldd -TEST_CASES_2D = [{}, (3, 32, 32)] -TEST_CASES_3D = [{}, (3, 32, 32, 32)] -TEST_CASES_2D_ZERO_RANGE = [{"coeff_range": (0.0, 0.0)}, (3, 32, 32)] +TEST_CASES_2D = [{"prob": 1.0}, (3, 32, 32)] +TEST_CASES_3D = [{"prob": 1.0}, (3, 32, 32, 32)] +TEST_CASES_2D_ZERO_RANGE = [{"prob": 1.0, "coeff_range": (0.0, 0.0)}, (3, 32, 32)] TEST_CASES_2D_ONES = [ - {"coeff_range": (1.0, 1.0)}, + {"prob": 1.0, "coeff_range": (1.0, 1.0)}, np.asarray([[[7.3890562e00, 1.3533528e-01], [7.3890562e00, 2.2026465e04]]]), ] diff --git a/tests/test_rand_gaussian_noised.py b/tests/test_rand_gaussian_noised.py index 4b0d2a311a..d7a50b1aec 100644 --- a/tests/test_rand_gaussian_noised.py +++ b/tests/test_rand_gaussian_noised.py @@ -34,8 +34,9 @@ def test_correct_results(self, _, im_type, keys, mean, std): im = im_type(self.imt) noised = gaussian_fn({k: im for k in keys}) np.random.seed(seed) - np.random.random() for k in keys: + # simulate the randomize() of transform + np.random.random() expected = self.imt + np.random.normal(mean, np.random.uniform(0, std), size=self.imt.shape) self.assertEqual(type(im), type(noised[k])) if isinstance(noised[k], torch.Tensor): diff --git a/tests/test_rand_scale_intensity.py b/tests/test_rand_scale_intensity.py index b863e2f874..c3d18330ea 100644 --- a/tests/test_rand_scale_intensity.py +++ b/tests/test_rand_scale_intensity.py @@ -24,6 +24,8 @@ def test_value(self): scaler.set_random_state(seed=0) result = scaler(p(self.imt)) np.random.seed(0) + # simulate the randomize() of transform + np.random.random() expected = p((self.imt * (1 + np.random.uniform(low=-0.5, high=0.5))).astype(np.float32)) assert_allclose(result, p(expected), rtol=1e-7, atol=0) diff --git a/tests/test_rand_scale_intensityd.py b/tests/test_rand_scale_intensityd.py index fdcbd7146a..86bcb78a80 100644 --- a/tests/test_rand_scale_intensityd.py +++ b/tests/test_rand_scale_intensityd.py @@ -25,6 +25,8 @@ def test_value(self): scaler.set_random_state(seed=0) result = scaler({key: p(self.imt)}) np.random.seed(0) + # simulate the randomize() of transform + np.random.random() expected = (self.imt * (1 + np.random.uniform(low=-0.5, high=0.5))).astype(np.float32) assert_allclose(result[key], p(expected)) diff --git a/tests/test_rand_shift_intensity.py b/tests/test_rand_shift_intensity.py index 4c4dd87dfe..7f5b278fd0 100644 --- a/tests/test_rand_shift_intensity.py +++ b/tests/test_rand_shift_intensity.py @@ -23,6 +23,8 @@ def test_value(self): shifter.set_random_state(seed=0) result = shifter(self.imt, factor=1.0) np.random.seed(0) + # simulate the randomize() of transform + np.random.random() expected = self.imt + np.random.uniform(low=-1.0, high=1.0) np.testing.assert_allclose(result, expected) diff --git a/tests/test_rand_shift_intensityd.py b/tests/test_rand_shift_intensityd.py index c5dfb66722..5950faac26 100644 --- a/tests/test_rand_shift_intensityd.py +++ b/tests/test_rand_shift_intensityd.py @@ -25,6 +25,8 @@ def test_value(self): shifter.set_random_state(seed=0) result = shifter({key: p(self.imt)}) np.random.seed(0) + # simulate the randomize() of transform + np.random.random() expected = self.imt + np.random.uniform(low=-1.0, high=1.0) assert_allclose(result[key], p(expected)) @@ -36,6 +38,8 @@ def test_factor(self): shifter.set_random_state(seed=0) result = shifter(stats(data)) np.random.seed(0) + # simulate the randomize() of transform + np.random.random() expected = self.imt + np.random.uniform(low=-1.0, high=1.0) * np.nanmax(self.imt) np.testing.assert_allclose(result[key], expected) diff --git a/tests/test_rand_std_shift_intensity.py b/tests/test_rand_std_shift_intensity.py index 0c6382555e..5b0db09063 100644 --- a/tests/test_rand_std_shift_intensity.py +++ b/tests/test_rand_std_shift_intensity.py @@ -22,6 +22,8 @@ class TestRandStdShiftIntensity(NumpyImageTestCase2D): def test_value(self): for p in TEST_NDARRAYS: np.random.seed(0) + # simulate the randomize() of transform + np.random.random() factor = np.random.uniform(low=-1.0, high=1.0) offset = factor * np.std(self.imt) expected = p(self.imt + offset) diff --git a/tests/test_rand_std_shift_intensityd.py b/tests/test_rand_std_shift_intensityd.py index 0ab017a42d..fbc71721d0 100644 --- a/tests/test_rand_std_shift_intensityd.py +++ b/tests/test_rand_std_shift_intensityd.py @@ -23,6 +23,8 @@ def test_value(self): for p in TEST_NDARRAYS: key = "img" np.random.seed(0) + # simulate the randomize() of transform + np.random.random() factor = np.random.uniform(low=-1.0, high=1.0) expected = self.imt + factor * np.std(self.imt) shifter = RandStdShiftIntensityd(keys=[key], factors=1.0, prob=1.0) From b08925b8aeba14d91a78b63bc5c4573b5da281cc Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Sat, 9 Oct 2021 17:43:24 +0800 Subject: [PATCH 2/7] [DLMED] update test cases Signed-off-by: Nic Ma --- tests/test_grid_dataset.py | 4 ++-- tests/test_patch_dataset.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/test_grid_dataset.py b/tests/test_grid_dataset.py index 6e0aa4023e..0f4065a30b 100644 --- a/tests/test_grid_dataset.py +++ b/tests/test_grid_dataset.py @@ -56,7 +56,7 @@ def test_loading_array(self): np.testing.assert_equal(tuple(item[0].shape), (2, 1, 2, 2)) np.testing.assert_allclose( item[0], - np.array([[[[1.7413, 2.7413], [5.7413, 6.7413]]], [[[9.1419, 10.1419], [13.1419, 14.1419]]]]), + np.array([[[[2.0577, 3.0577], [6.0577, 7.0577]]], [[[10.5540, 11.5540], [14.5540, 15.5540]]]]), rtol=1e-5, ) np.testing.assert_allclose( @@ -69,7 +69,7 @@ def test_loading_array(self): np.testing.assert_equal(tuple(item[0].shape), (2, 1, 2, 2)) np.testing.assert_allclose( item[0], - np.array([[[[2.3944, 3.3944], [6.3944, 7.3944]]], [[[10.6551, 11.6551], [14.6551, 15.6551]]]]), + np.array([[[[1.6533, 2.6533], [5.6533, 6.6533]]], [[[9.8524, 10.8524], [13.8524, 14.8524]]]]), rtol=1e-3, ) np.testing.assert_allclose( diff --git a/tests/test_patch_dataset.py b/tests/test_patch_dataset.py index 4f6e9a25fd..7bb39bc969 100644 --- a/tests/test_patch_dataset.py +++ b/tests/test_patch_dataset.py @@ -59,7 +59,7 @@ def test_loading_array(self): np.testing.assert_allclose( item[0], np.array( - [[[1.779992, 2.779992, 3.779992], [5.779992, 6.779992, 7.779992], [9.779992, 10.779992, 11.779992]]] + [[[1.338681, 2.338681, 3.338681], [5.338681, 6.338681, 7.338681], [9.338681, 10.338681, 11.338681]]] ), rtol=1e-5, ) @@ -71,11 +71,11 @@ def test_loading_array(self): np.array( [ [ - [5.025618, 6.025618, 7.025618], - [9.025618, 10.025618, 11.025618], - [13.025618, 14.025618, 15.025618], - ] - ] + [4.957847, 5.957847, 6.957847], + [8.957847, 9.957847, 10.957847], + [12.957847, 13.957847, 14.957847], + ], + ], ), rtol=1e-5, ) From 8e6bf54fc156d98146e73bbfc10c5f178eecf531 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Mon, 11 Oct 2021 21:34:47 +0800 Subject: [PATCH 3/7] [DLMED] update according to comments Signed-off-by: Nic Ma --- monai/transforms/intensity/array.py | 33 ++++++++++++++---------- monai/transforms/intensity/dictionary.py | 12 ++++----- tests/test_rand_gaussian_noised.py | 7 ++--- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 9792d2fc86..0661b66fc9 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -86,6 +86,7 @@ class RandGaussianNoise(RandomizableTransform): prob: Probability to add Gaussian noise. mean: Mean or “centre” of the distribution. std: Standard deviation (spread) of distribution. + """ backend = [TransformBackends.TORCH, TransformBackends.NUMPY] @@ -94,32 +95,36 @@ def __init__(self, prob: float = 0.1, mean: float = 0.0, std: float = 0.1) -> No RandomizableTransform.__init__(self, prob) self.mean = mean self.std = std + self.noise: Optional[np.ndarray] = None - def randomize(self, data: Any) -> None: + def randomize(self, img: NdarrayOrTensor, mean: Optional[float] = None) -> None: super().randomize(None) - self._rand_std = self.R.uniform(0, self.std) - - def compute(self, img: NdarrayOrTensor, mean: float = 0.0) -> NdarrayOrTensor: - noise = self.R.normal(mean, self._rand_std, size=img.shape) - noise_, *_ = convert_to_dst_type(noise, img) - return img + noise_ + if self._do_transform: + rand_std = self.R.uniform(0, self.std) + self.noise = self.R.normal(self.mean if mean is None else mean, rand_std, size=img.shape) - def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + def __call__(self, img: NdarrayOrTensor, mean: Optional[float] = None, randomize: bool = True) -> NdarrayOrTensor: """ Apply the transform to `img`. """ - self.randomize(None) - if not self._do_transform: - return img - return self.compute(img=img, mean=self.mean) + if randomize: + self.randomize(img=img, mean=self.mean if mean is None else mean) + + if self._do_transform: + if self.noise is None: + raise RuntimeError("please call the `randomize()` function first.") + noise, *_ = convert_to_dst_type(self.noise, img) + img = img + noise + + return img class RandRicianNoise(RandomizableTransform): """ Add Rician noise to image. Rician noise in MRI is the result of performing a magnitude operation on complex - data with Gaussian noise of the same variance in both channels, as described in `Noise in Magnitude Magnetic Resonance Images - `_. This transform is adapted from + data with Gaussian noise of the same variance in both channels, as described in `Noise in Magnitude + Magnetic Resonance Images `_. This transform is adapted from `DIPY`_. See also: `The rician distribution of noisy mri data `_. diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index e765608395..ff30093374 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -163,14 +163,13 @@ def __init__( self, keys: KeysCollection, prob: float = 0.1, - mean: Union[Sequence[float], float] = 0.0, + mean: float = 0.0, std: float = 0.1, allow_missing_keys: bool = False, ) -> None: MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.mean = ensure_tuple_rep(mean, len(self.keys)) - self.rand_gaussian_noise = RandGaussianNoise(std=std) + self.rand_gaussian_noise = RandGaussianNoise(mean=mean, std=std, prob=1.0) def set_random_state(self, seed=None, state=None): super().set_random_state(seed, state) @@ -182,9 +181,10 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N if not self._do_transform: return d - for key, mean in self.key_iterator(d, self.mean): - self.rand_gaussian_noise.randomize(None) - d[key] = self.rand_gaussian_noise.compute(img=d[key], mean=mean) + # all the keys share the same random noise + self.rand_gaussian_noise.randomize(d[self.keys[0]]) + for key in self.key_iterator(d): + d[key] = self.rand_gaussian_noise(img=d[key], randomize=False) return d diff --git a/tests/test_rand_gaussian_noised.py b/tests/test_rand_gaussian_noised.py index d7a50b1aec..d9fd5d023d 100644 --- a/tests/test_rand_gaussian_noised.py +++ b/tests/test_rand_gaussian_noised.py @@ -34,10 +34,11 @@ def test_correct_results(self, _, im_type, keys, mean, std): im = im_type(self.imt) noised = gaussian_fn({k: im for k in keys}) np.random.seed(seed) + # simulate the randomize() of transform + np.random.random() + noise = np.random.normal(mean, np.random.uniform(0, std), size=self.imt.shape) for k in keys: - # simulate the randomize() of transform - np.random.random() - expected = self.imt + np.random.normal(mean, np.random.uniform(0, std), size=self.imt.shape) + expected = self.imt + noise self.assertEqual(type(im), type(noised[k])) if isinstance(noised[k], torch.Tensor): noised[k] = noised[k].cpu() From 8716b580d6cd3f76f420904371fa4fc80a8412e2 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 12 Oct 2021 01:08:57 +0800 Subject: [PATCH 4/7] [DLMED] update all the intensity transforms Signed-off-by: Nic Ma --- monai/transforms/intensity/array.py | 363 +++++++++++---------- monai/transforms/intensity/dictionary.py | 382 ++++++++++------------- tests/test_rand_gibbs_noised.py | 2 +- tests/test_rand_histogram_shift.py | 4 +- tests/test_rand_k_space_spike_noised.py | 66 +--- tests/test_rand_rician_noised.py | 1 + tests/test_rand_scale_intensityd.py | 2 +- 7 files changed, 369 insertions(+), 451 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 0661b66fc9..fea3b3b5a5 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -176,30 +176,28 @@ def _add_noise(self, img: NdarrayTensor, mean: float, std: float): return np.sqrt((img + self._noise1) ** 2 + self._noise2 ** 2) - def compute(self, img: NdarrayTensor) -> NdarrayTensor: - if self.channel_wise: - _mean = ensure_tuple_rep(self.mean, len(img)) - _std = ensure_tuple_rep(self.std, len(img)) - for i, d in enumerate(img): - img[i] = self._add_noise(d, mean=_mean[i], std=_std[i] * d.std() if self.relative else _std[i]) - else: - if not isinstance(self.mean, (int, float)): - raise RuntimeError("If channel_wise is False, mean must be a float or int number.") - if not isinstance(self.std, (int, float)): - raise RuntimeError("If channel_wise is False, std must be a float or int number.") - std = self.std * img.std() if self.relative else self.std - if not isinstance(std, (int, float)): - raise RuntimeError("std must be a float or int number.") - img = self._add_noise(img, mean=self.mean, std=std) - return img - - def __call__(self, img: NdarrayTensor) -> NdarrayTensor: + def __call__(self, img: NdarrayTensor, randomize: bool = True) -> NdarrayTensor: """ Apply the transform to `img`. """ - super().randomize(None) + if randomize: + super().randomize(None) + if self._do_transform: - img = self.compute(img=img) + if self.channel_wise: + _mean = ensure_tuple_rep(self.mean, len(img)) + _std = ensure_tuple_rep(self.std, len(img)) + for i, d in enumerate(img): + img[i] = self._add_noise(d, mean=_mean[i], std=_std[i] * d.std() if self.relative else _std[i]) + else: + if not isinstance(self.mean, (int, float)): + raise RuntimeError("If channel_wise is False, mean must be a float or int number.") + if not isinstance(self.std, (int, float)): + raise RuntimeError("If channel_wise is False, std must be a float or int number.") + std = self.std * img.std() if self.relative else self.std + if not isinstance(std, (int, float)): + raise RuntimeError("std must be a float or int number.") + img = self._add_noise(img, mean=self.mean, std=std) return img @@ -255,12 +253,15 @@ def __init__(self, offsets: Union[Tuple[float, float], float], prob: float = 0.1 def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) - - def compute(self, img: NdarrayOrTensor, factor: Optional[float] = None) -> NdarrayOrTensor: - return self._shfiter(img, self._offset if factor is None else self._offset * factor) + if self._do_transform: + self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) - def __call__(self, img: NdarrayOrTensor, factor: Optional[float] = None) -> NdarrayOrTensor: + def __call__( + self, + img: NdarrayOrTensor, + factor: Optional[float] = None, + randomize: bool = True, + ) -> NdarrayOrTensor: """ Apply the transform to `img`. @@ -270,9 +271,11 @@ def __call__(self, img: NdarrayOrTensor, factor: Optional[float] = None) -> Ndar can be some image specific value at runtime, like: max(img), etc. """ - self.randomize() + if randomize: + self.randomize() + if self._do_transform: - img = self.compute(img=img, factor=factor) + img = self._shfiter(img, self._offset if factor is None else self._offset * factor) return img @@ -371,21 +374,21 @@ def __init__( def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) - - def compute(self, img: NdarrayOrTensor) -> NdarrayOrTensor: - shifter = StdShiftIntensity( - factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise, dtype=self.dtype - ) - return shifter(img) + if self._do_transform: + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) - def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: """ Apply the transform to `img`. """ - self.randomize() + if randomize: + self.randomize() + if self._do_transform: - img = self.compute(img=img) + shifter = StdShiftIntensity( + factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise, dtype=self.dtype + ) + img = shifter(img=img) return img @@ -477,19 +480,18 @@ def __init__( def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) - - def compute(self, img: NdarrayOrTensor) -> NdarrayOrTensor: - scaler = ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype) - return scaler(img) + if self._do_transform: + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) - def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: """ Apply the transform to `img`. """ - self.randomize() + if randomize: + self.randomize() + if self._do_transform: - img = self.compute(img=img) + img = ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype)(img) return img @@ -555,28 +557,28 @@ def _generate_random_field(self, spatial_shape: Sequence[int], degree: int, coef def randomize(self, data: np.ndarray) -> None: super().randomize(None) - n_coeff = int(np.prod([(self.degree + k) / k for k in range(1, len(data.shape[1:]) + 1)])) - self._coeff = self.R.uniform(*self.coeff_range, n_coeff).tolist() - - def compute(self, img: np.ndarray): - img, *_ = convert_data_type(img, np.ndarray) # type: ignore - num_channels, *spatial_shape = img.shape - _bias_fields = np.stack( - [ - self._generate_random_field(spatial_shape=spatial_shape, degree=self.degree, coeff=self._coeff) - for _ in range(num_channels) - ], - axis=0, - ) - return (img * np.exp(_bias_fields)).astype(self.dtype) + if self._do_transform: + n_coeff = int(np.prod([(self.degree + k) / k for k in range(1, len(data.shape[1:]) + 1)])) + self._coeff = self.R.uniform(*self.coeff_range, n_coeff).tolist() - def __call__(self, img: np.ndarray): + def __call__(self, img: np.ndarray, randomize: bool = True): """ Apply the transform to `img`. """ - self.randomize(data=img) + if randomize: + self.randomize(data=img) + if self._do_transform: - img = self.compute(img=img) + img, *_ = convert_data_type(img, np.ndarray) # type: ignore + num_channels, *spatial_shape = img.shape + _bias_fields = np.stack( + [ + self._generate_random_field(spatial_shape=spatial_shape, degree=self.degree, coeff=self._coeff) + for _ in range(num_channels) + ], + axis=0, + ) + img = (img * np.exp(_bias_fields)).astype(self.dtype) return img @@ -804,24 +806,24 @@ def __init__(self, prob: float = 0.1, gamma: Union[Sequence[float], float] = (0. else: self.gamma = (min(gamma), max(gamma)) - self.gamma_value: float + self.gamma_value: Optional[float] = None def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1]) - - def compute(self, img: NdarrayOrTensor) -> NdarrayOrTensor: - return AdjustContrast(self.gamma_value)(img) + if self._do_transform: + self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1]) - def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: """ Apply the transform to `img`. """ - self.randomize() + if randomize: + self.randomize() + if self.gamma_value is None: - raise ValueError("gamma_value is not set.") + raise RuntimeError("gamma_value is not set, please call `randomize` function first.") if self._do_transform: - img = self.compute(img=img) + img = AdjustContrast(self.gamma_value)(img) return img @@ -1088,6 +1090,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: gaussian_filter = GaussianFilter(img_t.ndim - 1, sigma, approx=self.approx) out_t: torch.Tensor = gaussian_filter(img_t.unsqueeze(0)).squeeze(0) out, *_ = convert_data_type(out_t, type(img), device=img.device if isinstance(img, torch.Tensor) else None) + return out @@ -1127,17 +1130,20 @@ def __init__( def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1]) - self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1]) - self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1]) + if self._do_transform: + self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1]) + self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1]) + self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1]) - def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: - self.randomize() - if not self._do_transform: - img, *_ = convert_data_type(img, dtype=torch.float) - return img - sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=img.ndim - 1) - return GaussianSmooth(sigma=sigma, approx=self.approx)(img) + def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: + if randomize: + self.randomize() + + if self._do_transform: + sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=img.ndim - 1) + img = GaussianSmooth(sigma=sigma, approx=self.approx)(img) + + return img class GaussianSharpen(Transform): @@ -1241,29 +1247,40 @@ def __init__( self.sigma2_z = sigma2_z self.alpha = alpha self.approx = approx + self.x1: Optional[float] = None + self.y1: Optional[float] = None + self.z1: Optional[float] = None + self.x2: Optional[float] = None + self.y2: Optional[float] = None + self.z2: Optional[float] = None + self.a: Optional[float] = None def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1]) - self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1]) - self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1]) - sigma2_x = (self.sigma2_x, self.x1) if not isinstance(self.sigma2_x, Iterable) else self.sigma2_x - sigma2_y = (self.sigma2_y, self.y1) if not isinstance(self.sigma2_y, Iterable) else self.sigma2_y - sigma2_z = (self.sigma2_z, self.z1) if not isinstance(self.sigma2_z, Iterable) else self.sigma2_z - self.x2 = self.R.uniform(low=sigma2_x[0], high=sigma2_x[1]) - self.y2 = self.R.uniform(low=sigma2_y[0], high=sigma2_y[1]) - self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1]) - self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1]) + if self._do_transform: + self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1]) + self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1]) + self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1]) + sigma2_x = (self.sigma2_x, self.x1) if not isinstance(self.sigma2_x, Iterable) else self.sigma2_x + sigma2_y = (self.sigma2_y, self.y1) if not isinstance(self.sigma2_y, Iterable) else self.sigma2_y + sigma2_z = (self.sigma2_z, self.z1) if not isinstance(self.sigma2_z, Iterable) else self.sigma2_z + self.x2 = self.R.uniform(low=sigma2_x[0], high=sigma2_x[1]) + self.y2 = self.R.uniform(low=sigma2_y[0], high=sigma2_y[1]) + self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1]) + self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1]) + + def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: + if randomize: + self.randomize() - def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: - self.randomize() - # if not doing, just need to convert to tensor - if not self._do_transform: - img, *_ = convert_data_type(img, dtype=torch.float32) - return img - sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=img.ndim - 1) - sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=img.ndim - 1) - return GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(img) + if self._do_transform: + if self.x2 is None or self.y2 is None or self.z2 is None or self.a is None: + raise RuntimeError("please call the `randomize()` function first.") + sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=img.ndim - 1) + sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=img.ndim - 1) + img = GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(img) + + return img class RandHistogramShift(RandomizableTransform): @@ -1292,29 +1309,38 @@ def __init__(self, num_control_points: Union[Tuple[int, int], int] = 10, prob: f if min(num_control_points) <= 2: raise ValueError("num_control_points should be greater than or equal to 3") self.num_control_points = (min(num_control_points), max(num_control_points)) + self.reference_control_points: np.ndarray + self.floating_control_points: np.ndarray def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1) - self.reference_control_points = np.linspace(0, 1, num_control_point) - self.floating_control_points = np.copy(self.reference_control_points) - for i in range(1, num_control_point - 1): - self.floating_control_points[i] = self.R.uniform( - self.floating_control_points[i - 1], self.floating_control_points[i + 1] + if self._do_transform: + num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1) + self.reference_control_points = np.linspace(0, 1, num_control_point) + self.floating_control_points = np.copy(self.reference_control_points) + for i in range(1, num_control_point - 1): + self.floating_control_points[i] = self.R.uniform( + self.floating_control_points[i - 1], self.floating_control_points[i + 1] + ) + + def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: + if randomize: + self.randomize() + + if self._do_transform: + if self.reference_control_points is None or self.floating_control_points is None: + raise RuntimeError("please call the `randomize()` function first.") + img_np: np.ndarray + img_np, *_ = convert_data_type(img, np.ndarray) # type: ignore + img_min, img_max = img_np.min(), img_np.max() + reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min + floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min + img_np = np.asarray( + np.interp(img_np, reference_control_points_scaled, floating_control_points_scaled), dtype=img_np.dtype ) + img, *_ = convert_to_dst_type(img_np, dst=img) - def __call__(self, img: NdarrayOrTensor) -> np.ndarray: - img_np: np.ndarray - img_np, *_ = convert_data_type(img, np.ndarray) # type: ignore - self.randomize() - if not self._do_transform: - return img_np - img_min, img_max = img_np.min(), img_np.max() - reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min - floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min - return np.asarray( - np.interp(img_np, reference_control_points_scaled, floating_control_points_scaled), dtype=img_np.dtype - ) + return img class GibbsNoise(Transform, Fourier): @@ -1340,10 +1366,10 @@ class GibbsNoise(Transform, Fourier): backend = [TransformBackends.TORCH, TransformBackends.NUMPY] @deprecated_arg(name="as_tensor_output", since="0.6") - def __init__(self, alpha: float = 0.5, as_tensor_output: bool = True) -> None: + def __init__(self, alpha: float = 0.1, as_tensor_output: bool = True) -> None: if alpha > 1 or alpha < 0: - raise ValueError("alpha must take values in the interval [0,1].") + raise ValueError("alpha must take values in the interval [0, 1].") self.alpha = alpha def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: @@ -1417,11 +1443,10 @@ class RandGibbsNoise(RandomizableTransform): @deprecated_arg(name="as_tensor_output", since="0.6") def __init__(self, prob: float = 0.1, alpha: Sequence[float] = (0.0, 1.0), as_tensor_output: bool = True) -> None: - if len(alpha) != 2: raise ValueError("alpha length must be 2.") if alpha[1] > 1 or alpha[0] < 0: - raise ValueError("alpha must take values in the interval [0,1]") + raise ValueError("alpha must take values in the interval [0, 1]") if alpha[0] > alpha[1]: raise ValueError("When alpha = [a,b] we need a < b.") @@ -1430,24 +1455,24 @@ def __init__(self, prob: float = 0.1, alpha: Sequence[float] = (0.0, 1.0), as_te RandomizableTransform.__init__(self, prob=prob) - def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: - - # randomize application and possibly alpha - self._randomize(None) - - if self._do_transform: - # apply transform - transform = GibbsNoise(self.sampled_alpha) - img = transform(img) - return img - - def _randomize(self, _: Any) -> None: + def randomize(self, data: Any) -> None: """ (1) Set random variable to apply the transform. (2) Get alpha from uniform distribution. """ super().randomize(None) - self.sampled_alpha = self.R.uniform(self.alpha[0], self.alpha[1]) + if self._do_transform: + self.sampled_alpha = self.R.uniform(self.alpha[0], self.alpha[1]) + + def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: + if randomize: + # randomize application and possibly alpha + self.randomize(None) + + if self._do_transform: + img = GibbsNoise(self.sampled_alpha)(img) + + return img class KSpaceSpikeNoise(Transform, Fourier): @@ -1603,12 +1628,10 @@ class RandKSpaceSpikeNoise(RandomizableTransform, Fourier): Args: prob: probability of applying the transform, either on all channels at once, or channel-wise if ``channel_wise = True``. - intensity_range: pass a tuple - (a, b) to sample the log-intensity from the interval (a, b) + intensity_range: pass a tuple (a, b) to sample the log-intensity from the interval (a, b) uniformly for all channels. Or pass sequence of intevals ((a0, b0), (a1, b1), ...) to sample for each respective channel. - In the second case, the number of 2-tuples must match the number of - channels. + In the second case, the number of 2-tuples must match the number of channels. Default ranges is `(0.95x, 1.10x)` where `x` is the mean log-intensity for each channel. channel_wise: treat each channel independently. True by @@ -1628,7 +1651,7 @@ def __init__( self, prob: float = 0.1, intensity_range: Optional[Sequence[Union[Sequence[float], float]]] = None, - channel_wise=True, + channel_wise: bool = True, as_tensor_output: bool = True, ): @@ -1642,13 +1665,14 @@ def __init__( super().__init__(prob) - def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: """ Apply transform to `img`. Assumes data is in channel-first form. Args: img: image with dimensions (C, H, W) or (C, H, W, D) """ + if ( self.intensity_range is not None and isinstance(self.intensity_range[0], Sequence) @@ -1661,18 +1685,16 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: self.sampled_k_intensity = [] self.sampled_locs = [] - intensity_range = self._make_sequence(img) - self._randomize(img, intensity_range) + if randomize: + intensity_range = self._make_sequence(img) + self.randomize(img, intensity_range) - # build/appy transform only if there are spike locations - if self.sampled_locs: - transform = KSpaceSpikeNoise(self.sampled_locs, self.sampled_k_intensity) - out: NdarrayOrTensor = transform(img) - return out + if self._do_transform: + img = KSpaceSpikeNoise(self.sampled_locs, self.sampled_k_intensity)(img) return img - def _randomize(self, img: NdarrayOrTensor, intensity_range: Sequence[Sequence[float]]) -> None: + def randomize(self, img: NdarrayOrTensor, intensity_range: Sequence[Sequence[float]]) -> None: # type: ignore """ Helper method to sample both the location and intensity of the spikes. When not working channel wise (channel_wise=False) it use the random @@ -1682,17 +1704,15 @@ def _randomize(self, img: NdarrayOrTensor, intensity_range: Sequence[Sequence[fl When working channel wise, the method randomly samples a location and intensity for each channel depending on ``self._do_transform``. """ - # randomizing per channel - if self.channel_wise: - for i, chan in enumerate(img): - super().randomize(None) - if self._do_transform: + super().randomize(None) + if self._do_transform: + if self.channel_wise: + # randomizing per channel + for i, chan in enumerate(img): self.sampled_locs.append((i,) + tuple(self.R.randint(0, k) for k in chan.shape)) self.sampled_k_intensity.append(self.R.uniform(intensity_range[i][0], intensity_range[i][1])) - # working with all channels together - else: - super().randomize(None) - if self._do_transform: + else: + # working with all channels together spatial = tuple(self.R.randint(0, k) for k in img.shape[1:]) self.sampled_locs = [(i,) + spatial for i in range(img.shape[0])] if isinstance(intensity_range[0], Sequence): @@ -1770,15 +1790,16 @@ def __init__( def randomize(self, img_size: Sequence[int]) -> None: super().randomize(None) - size = fall_back_tuple(self.spatial_size, img_size) - self.hole_coords = [] # clear previously computed coords - num_holes = self.holes if self.max_holes is None else self.R.randint(self.holes, self.max_holes + 1) - for _ in range(num_holes): - if self.max_spatial_size is not None: - max_size = fall_back_tuple(self.max_spatial_size, img_size) - size = tuple(self.R.randint(low=size[i], high=max_size[i] + 1) for i in range(len(img_size))) - valid_size = get_valid_patch_size(img_size, size) - self.hole_coords.append((slice(None),) + get_random_patch(img_size, valid_size, self.R)) + if self._do_transform: + size = fall_back_tuple(self.spatial_size, img_size) + self.hole_coords = [] # clear previously computed coords + num_holes = self.holes if self.max_holes is None else self.R.randint(self.holes, self.max_holes + 1) + for _ in range(num_holes): + if self.max_spatial_size is not None: + max_size = fall_back_tuple(self.max_spatial_size, img_size) + size = tuple(self.R.randint(low=size[i], high=max_size[i] + 1) for i in range(len(img_size))) + valid_size = get_valid_patch_size(img_size, size) + self.hole_coords.append((slice(None),) + get_random_patch(img_size, valid_size, self.R)) @abstractmethod def _transform_holes(self, img: np.ndarray) -> np.ndarray: @@ -1788,10 +1809,12 @@ def _transform_holes(self, img: np.ndarray) -> np.ndarray: """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") - def __call__(self, img: np.ndarray): - img, *_ = convert_data_type(img, np.ndarray) # type: ignore - self.randomize(img.shape[1:]) + def __call__(self, img: np.ndarray, randomize: bool = True): + if randomize: + self.randomize(img.shape[1:]) + if self._do_transform: + img, *_ = convert_data_type(img, np.ndarray) # type: ignore img = self._transform_holes(img=img) return img diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index ff30093374..26b8ac6447 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -15,11 +15,9 @@ Class names are ended with 'd' to denote dictionary-based transforms. """ -from collections.abc import Iterable -from typing import Any, Callable, Dict, Hashable, Mapping, Optional, Sequence, Tuple, Union +from typing import Callable, Dict, Hashable, Mapping, Optional, Sequence, Tuple, Union import numpy as np -import torch from monai.config import DtypeLike, KeysCollection, NdarrayTensor from monai.config.type_definitions import NdarrayOrTensor @@ -37,6 +35,10 @@ RandCoarseDropout, RandCoarseShuffle, RandGaussianNoise, + RandGaussianSharpen, + RandGaussianSmooth, + RandGibbsNoise, + RandHistogramShift, RandKSpaceSpikeNoise, RandRicianNoise, RandScaleIntensity, @@ -49,12 +51,10 @@ StdShiftIntensity, ThresholdIntensity, ) -from monai.transforms.transform import MapTransform, Randomizable, RandomizableTransform +from monai.transforms.transform import MapTransform, RandomizableTransform from monai.transforms.utils import is_positive -from monai.utils import ensure_tuple, ensure_tuple_rep, ensure_tuple_size +from monai.utils import ensure_tuple, ensure_tuple_rep from monai.utils.deprecated import deprecated_arg -from monai.utils.enums import TransformBackends -from monai.utils.type_conversion import convert_data_type __all__ = [ "RandGaussianNoised", @@ -171,7 +171,7 @@ def __init__( RandomizableTransform.__init__(self, prob) self.rand_gaussian_noise = RandGaussianNoise(mean=mean, std=std, prob=1.0) - def set_random_state(self, seed=None, state=None): + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): super().set_random_state(seed, state) self.rand_gaussian_noise.set_random_state(seed, state) @@ -219,7 +219,7 @@ def __init__( self, keys: KeysCollection, global_prob: float = 0.1, - prob: float = 1.0, + prob: float = 0.1, mean: Union[Sequence[float], float] = 0.0, std: Union[Sequence[float], float] = 1.0, channel_wise: bool = False, @@ -229,9 +229,16 @@ def __init__( ) -> None: MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, global_prob) - self.rand_rician_noise = RandRicianNoise(prob, mean, std, channel_wise, relative, sample_std) + self.rand_rician_noise = RandRicianNoise( + prob=1.0, + mean=mean, + std=std, + channel_wise=channel_wise, + relative=relative, + sample_std=sample_std, + ) - def set_random_state(self, seed=None, state=None): + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): super().set_random_state(seed, state) self.rand_rician_noise.set_random_state(seed, state) @@ -240,9 +247,9 @@ def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, Nda super().randomize(None) if not self._do_transform: return d + for key in self.key_iterator(d): - self.rand_rician_noise.randomize(None) - d[key] = self.rand_rician_noise.compute(d[key]) + d[key] = self.rand_rician_noise(d[key], randomize=True) return d @@ -352,9 +359,9 @@ def __init__( if len(self.keys) != len(self.meta_keys): raise ValueError("meta_keys should have the same length as keys.") self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys)) - self.shifter = RandShiftIntensity(offsets=offsets) + self.shifter = RandShiftIntensity(offsets=offsets, prob=1.0) - def set_random_state(self, seed=None, state=None): + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): super().set_random_state(seed, state) self.shifter.set_random_state(seed, state) @@ -371,7 +378,7 @@ def __call__(self, data) -> Dict[Hashable, NdarrayOrTensor]: ): meta_key = meta_key or f"{key}_{meta_key_postfix}" factor: Optional[float] = d[meta_key].get(factor_key) if meta_key in d else None - d[key] = self.shifter.compute(d[key], factor=factor) + d[key] = self.shifter(d[key], factor=factor, randomize=False) return d @@ -443,9 +450,15 @@ def __init__( """ MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.shifter = RandStdShiftIntensity(factors=factors, nonzero=nonzero, channel_wise=channel_wise, dtype=dtype) + self.shifter = RandStdShiftIntensity( + factors=factors, + nonzero=nonzero, + channel_wise=channel_wise, + dtype=dtype, + prob=1.0, + ) - def set_random_state(self, seed=None, state=None): + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): super().set_random_state(seed, state) self.shifter.set_random_state(seed, state) @@ -458,7 +471,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N # all the keys share the same random shift factor self.shifter.randomize(None) for key in self.key_iterator(d): - d[key] = self.shifter.compute(d[key]) + d[key] = self.shifter(d[key], randomize=False) return d @@ -534,9 +547,9 @@ def __init__( """ MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.scaler = RandScaleIntensity(factors=factors, dtype=dtype) + self.scaler = RandScaleIntensity(factors=factors, dtype=dtype, prob=1.0) - def set_random_state(self, seed=None, state=None): + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): super().set_random_state(seed, state) self.scaler.set_random_state(seed, state) @@ -549,7 +562,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N # all the keys share the same random scale factor self.scaler.randomize(None) for key in self.key_iterator(d): - d[key] = self.scaler.compute(d[key]) + d[key] = self.scaler(d[key], randomize=False) return d @@ -564,7 +577,7 @@ def __init__( degree: int = 3, coeff_range: Tuple[float, float] = (0.0, 0.1), dtype: DtypeLike = np.float32, - prob: float = 1.0, + prob: float = 0.1, allow_missing_keys: bool = False, ) -> None: """ @@ -582,11 +595,11 @@ def __init__( MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.rand_bias_field = RandBiasField(degree, coeff_range, dtype, prob) + self.rand_bias_field = RandBiasField(degree=degree, coeff_range=coeff_range, dtype=dtype, prob=1.0) - def set_random_state(self, seed=None, state=None): + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): super().set_random_state(seed, state) - self.scaler.set_random_state(seed, state) + self.rand_bias_field.set_random_state(seed, state) def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) @@ -594,9 +607,10 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda if not self._do_transform: return d + # all the keys share the same random bias factor + self.rand_bias_field.randomize(d[self.keys[0]]) for key in self.key_iterator(d): - self.rand_bias_field.randomize(d[key]) - d[key] = self.rand_bias_field.compute(d[key]) + d[key] = self.rand_bias_field(d[key], randomize=False) return d @@ -764,9 +778,9 @@ def __init__( ) -> None: MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.adjuster = RandAdjustContrast(gamma=gamma) + self.adjuster = RandAdjustContrast(gamma=gamma, prob=1.0) - def set_random_state(self, seed=None, state=None): + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): super().set_random_state(seed, state) self.adjuster.set_random_state(seed, state) @@ -779,7 +793,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N # all the keys share the same random gamma value self.adjuster.randomize(None) for key in self.key_iterator(d): - d[key] = self.adjuster.compute(d[key]) + d[key] = self.adjuster(d[key], randomize=False) return d @@ -914,7 +928,7 @@ class RandGaussianSmoothd(RandomizableTransform, MapTransform): """ - backend = GaussianSmooth.backend + backend = RandGaussianSmooth.backend def __init__( self, @@ -928,26 +942,28 @@ def __init__( ) -> None: MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.sigma_x, self.sigma_y, self.sigma_z = sigma_x, sigma_y, sigma_z - self.approx = approx - - self.x, self.y, self.z = self.sigma_x[0], self.sigma_y[0], self.sigma_z[0] + self.rand_smooth = RandGaussianSmooth( + sigma_x=sigma_x, + sigma_y=sigma_y, + sigma_z=sigma_z, + approx=approx, + prob=1.0, + ) - def randomize(self, data: Optional[Any] = None) -> None: - super().randomize(None) - self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1]) - self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1]) - self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1]) + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + super().set_random_state(seed, state) + self.rand_smooth.set_random_state(seed, state) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - self.randomize() + self.randomize(None) + if not self._do_transform: + return d + + # all the keys share the same random sigma + self.rand_smooth.randomize(None) for key in self.key_iterator(d): - if self._do_transform: - sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=d[key].ndim - 1) - d[key] = GaussianSmooth(sigma=sigma, approx=self.approx)(d[key]) - else: - d[key], *_ = convert_data_type(d[key], torch.Tensor, dtype=torch.float) + d[key] = self.rand_smooth(d[key], randomize=False) return d @@ -1016,7 +1032,7 @@ class RandGaussianSharpend(RandomizableTransform, MapTransform): """ - backend = GaussianSharpen.backend + backend = RandGaussianSharpen.backend def __init__( self, @@ -1034,39 +1050,32 @@ def __init__( ): MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - self.sigma1_x = sigma1_x - self.sigma1_y = sigma1_y - self.sigma1_z = sigma1_z - self.sigma2_x = sigma2_x - self.sigma2_y = sigma2_y - self.sigma2_z = sigma2_z - self.alpha = alpha - self.approx = approx - - def randomize(self, data: Optional[Any] = None) -> None: - super().randomize(None) - self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1]) - self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1]) - self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1]) - sigma2_x = (self.sigma2_x, self.x1) if not isinstance(self.sigma2_x, Iterable) else self.sigma2_x - sigma2_y = (self.sigma2_y, self.y1) if not isinstance(self.sigma2_y, Iterable) else self.sigma2_y - sigma2_z = (self.sigma2_z, self.z1) if not isinstance(self.sigma2_z, Iterable) else self.sigma2_z - self.x2 = self.R.uniform(low=sigma2_x[0], high=sigma2_x[1]) - self.y2 = self.R.uniform(low=sigma2_y[0], high=sigma2_y[1]) - self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1]) - self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1]) + self.rand_sharpen = RandGaussianSharpen( + sigma1_x=sigma1_x, + sigma1_y=sigma1_y, + sigma1_z=sigma1_z, + sigma2_x=sigma2_x, + sigma2_y=sigma2_y, + sigma2_z=sigma2_z, + alpha=alpha, + approx=approx, + prob=1.0, + ) + + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + super().set_random_state(seed, state) + self.rand_sharpen.set_random_state(seed, state) def __call__(self, data: Dict[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - self.randomize() + self.randomize(None) + if not self._do_transform: + return d + + # all the keys share the same random sigma1, sigma2, etc. + self.rand_sharpen.randomize(None) for key in self.key_iterator(d): - if self._do_transform: - sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=d[key].ndim - 1) - sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=d[key].ndim - 1) - d[key] = GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(d[key]) - else: - # if not doing the transform, convert to torch - d[key], *_ = convert_data_type(d[key], torch.Tensor, dtype=torch.float32) + d[key] = self.rand_sharpen(d[key], randomize=False) return d @@ -1085,7 +1094,7 @@ class RandHistogramShiftd(RandomizableTransform, MapTransform): allow_missing_keys: don't raise exception if key is missing. """ - backend = [TransformBackends.NUMPY] + backend = RandHistogramShift.backend def __init__( self, @@ -1096,40 +1105,22 @@ def __init__( ) -> None: MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob) - if isinstance(num_control_points, int): - if num_control_points <= 2: - raise ValueError("num_control_points should be greater than or equal to 3") - self.num_control_points = (num_control_points, num_control_points) - else: - if len(num_control_points) != 2: - raise ValueError("num_control points should be a number or a pair of numbers") - if min(num_control_points) <= 2: - raise ValueError("num_control_points should be greater than or equal to 3") - self.num_control_points = (min(num_control_points), max(num_control_points)) - - def randomize(self, data: Optional[Any] = None) -> None: - super().randomize(None) - num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1) - self.reference_control_points = np.linspace(0, 1, num_control_point) - self.floating_control_points = np.copy(self.reference_control_points) - for i in range(1, num_control_point - 1): - self.floating_control_points[i] = self.R.uniform( - self.floating_control_points[i - 1], self.floating_control_points[i + 1] - ) + self.shifter = RandHistogramShift(num_control_points=num_control_points, prob=1.0) - def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + super().set_random_state(seed, state) + self.shifter.set_random_state(seed, state) + + def __call__(self, data: Dict[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - self.randomize() + self.randomize(None) + if not self._do_transform: + return d + + # all the keys share the same random shift params + self.shifter.randomize(None) for key in self.key_iterator(d): - d[key] = convert_data_type(d[key], np.ndarray)[0] - if self._do_transform: - img_min, img_max = d[key].min(), d[key].max() - reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min - floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min - dtype = d[key].dtype - d[key] = np.interp(d[key], reference_control_points_scaled, floating_control_points_scaled).astype( - dtype - ) + d[key] = self.shifter(d[key], randomize=False) return d @@ -1158,7 +1149,7 @@ class RandGibbsNoised(RandomizableTransform, MapTransform): allow_missing_keys: do not raise exception if key is missing. """ - backend = GibbsNoise.backend + backend = RandGibbsNoise.backend @deprecated_arg(name="as_tensor_output", since="0.6") def __init__( @@ -1172,29 +1163,24 @@ def __init__( MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob=prob) - self.alpha = alpha - self.sampled_alpha = -1.0 # stores last alpha sampled by randomize() + self.rand_gibbs_noise = RandGibbsNoise(alpha=alpha, prob=1.0) - def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + super().set_random_state(seed, state) + self.rand_gibbs_noise.set_random_state(seed, state) + def __call__(self, data: Dict[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - self._randomize(None) + self.randomize(None) + if not self._do_transform: + return d - for i, key in enumerate(self.key_iterator(d)): - if self._do_transform: - if i == 0: - transform = GibbsNoise(self.sampled_alpha) - d[key] = transform(d[key]) + # all the keys share the same random noise params + self.rand_gibbs_noise.randomize(None) + for key in self.key_iterator(d): + d[key] = self.rand_gibbs_noise(d[key], randomize=False) return d - def _randomize(self, _: Any) -> None: - """ - (1) Set random variable to apply the transform. - (2) Get alpha from uniform distribution. - """ - super().randomize(None) - self.sampled_alpha = self.R.uniform(self.alpha[0], self.alpha[1]) - class GibbsNoised(MapTransform): """ @@ -1319,101 +1305,63 @@ class RandKSpaceSpikeNoised(RandomizableTransform, MapTransform): Args: keys: "image", "label", or ["image", "label"] depending on which data you need to transform. - global_prob: probability of applying transform to the dictionary. prob: probability to add spike artifact to each item in the dictionary provided it is realized that the noise will be applied to the dictionary. - intensity_ranges: Dictionary with intensity - ranges to sample for each key. Given a dictionary value of `(a, b)` the - transform will sample the log-intensity from the interval `(a, b)` uniformly for all - channels of the respective key. If a sequence of intevals `((a0, b0), (a1, b1), ...)` - is given, then the transform will sample from each interval for each - respective channel. In the second case, the number of 2-tuples must - match the number of channels. Default ranges is `(0.95x, 1.10x)` - where `x` is the mean log-intensity for each channel. - channel_wise: treat each channel independently. True by - default. - common_sampling: If ``True`` same values for location and log-intensity - will be sampled for the image and label. - common_seed: Seed to be used in case ``common_sampling = True``. + intensity_range: pass a tuple (a, b) to sample the log-intensity from the interval (a, b) + uniformly for all channels. Or pass sequence of intevals + ((a0, b0), (a1, b1), ...) to sample for each respective channel. + In the second case, the number of 2-tuples must match the number of channels. + Default ranges is `(0.95x, 1.10x)` where `x` is the mean + log-intensity for each channel. + channel_wise: treat each channel independently. True by default. allow_missing_keys: do not raise exception if key is missing. Example: To apply `k`-space spikes randomly on the image only, with probability 0.5, and log-intensity sampled from the interval [13, 15] for each channel independently, one uses - ``RandKSpaceSpikeNoised("image", prob=0.5, intensity_ranges={"image":(13,15)}, channel_wise=True)``. + ``RandKSpaceSpikeNoised("image", prob=0.5, intensity_ranges=(13, 15), channel_wise=True)``. """ - backend = KSpaceSpikeNoise.backend + backend = RandKSpaceSpikeNoise.backend @deprecated_arg(name="as_tensor_output", since="0.6") + @deprecated_arg(name="common_sampling", since="0.6") + @deprecated_arg(name="common_seed", since="0.6") + @deprecated_arg(name="global_prob", since="0.6") def __init__( self, keys: KeysCollection, global_prob: float = 1.0, prob: float = 0.1, - intensity_ranges: Optional[Mapping[Hashable, Sequence[Union[Sequence[float], float]]]] = None, + intensity_range: Optional[Sequence[Union[Sequence[float], float]]] = None, channel_wise: bool = True, common_sampling: bool = False, common_seed: int = 42, allow_missing_keys: bool = False, as_tensor_output: bool = True, ): - MapTransform.__init__(self, keys, allow_missing_keys) - RandomizableTransform.__init__(self, global_prob) + RandomizableTransform.__init__(self, prob=prob) + self.rand_noise = RandKSpaceSpikeNoise(prob=1.0, intensity_range=intensity_range, channel_wise=channel_wise) - self.common_sampling = common_sampling - self.common_seed = common_seed - # the spikes artifact is amplitude dependent so we instantiate one per key - self.transforms = {} - if isinstance(intensity_ranges, Mapping): - for k in self.keys: - self.transforms[k] = RandKSpaceSpikeNoise(prob, intensity_ranges[k], channel_wise) - else: - for k in self.keys: - self.transforms[k] = RandKSpaceSpikeNoise(prob, None, channel_wise) + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + super().set_random_state(seed, state) + self.rand_noise.set_random_state(seed, state) - def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: - """ - Args: - data: Expects image/label to have dimensions (C, H, W) or - (C, H, W, D), where C is the channel. - """ + def __call__(self, data: Dict[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - super().randomize(None) - - # In case the same spikes are desired for both image and label. - if self.common_sampling: - for k in self.keys: - self.transforms[k].set_random_state(self.common_seed) + self.randomize(None) + if not self._do_transform: + return d - for key, t in self.key_iterator(d, self.transforms): - if self._do_transform: - d[key] = self.transforms[t](d[key]) + for key in self.key_iterator(d): + d[key] = self.rand_noise(d[key], randomize=True) return d - def set_rand_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None) -> None: - """ - Set the random state locally to control the randomness. - User should use this method instead of ``set_random_state``. - Args: - seed: set the random state with an integer seed. - state: set the random state with a `np.random.RandomState` object.""" - - self.set_random_state(seed, state) - for key in self.keys: - self.transforms[key].set_random_state(seed, state) - - def _to_numpy(self, d: Union[torch.Tensor, np.ndarray]) -> np.ndarray: - if isinstance(d, torch.Tensor): - d_numpy: np.ndarray = d.cpu().detach().numpy() - return d_numpy - - -class RandCoarseDropoutd(Randomizable, MapTransform): +class RandCoarseDropoutd(RandomizableTransform, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.RandCoarseDropout`. Expect all the data specified by `keys` have same spatial shape and will randomly dropout the same regions @@ -1458,6 +1406,7 @@ def __init__( allow_missing_keys: bool = False, ): MapTransform.__init__(self, keys, allow_missing_keys) + RandomizableTransform.__init__(self, prob=prob) self.dropper = RandCoarseDropout( holes=holes, spatial_size=spatial_size, @@ -1465,31 +1414,28 @@ def __init__( fill_value=fill_value, max_holes=max_holes, max_spatial_size=max_spatial_size, - prob=prob, + prob=1.0, ) - def set_random_state( - self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None - ) -> "RandCoarseDropoutd": - self.dropper.set_random_state(seed, state) + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): super().set_random_state(seed, state) - return self - - def randomize(self, img_size: Sequence[int]) -> None: - self.dropper.randomize(img_size=img_size) + self.dropper.set_random_state(seed, state) def __call__(self, data): d = dict(data) - # expect all the specified keys have same spatial shape - self.randomize(d[self.keys[0]].shape[1:]) - if self.dropper._do_transform: - for key in self.key_iterator(d): - d[key] = self.dropper(img=d[key]) + self.randomize(None) + if not self._do_transform: + return d + + # expect all the specified keys have same spatial shape and share same random holes + self.dropper.randomize(d[self.keys[0]].shape[1:]) + for key in self.key_iterator(d): + d[key] = self.dropper(img=d[key], randomize=False) return d -class RandCoarseShuffled(Randomizable, MapTransform): +class RandCoarseShuffled(RandomizableTransform, MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.RandCoarseShuffle`. Expect all the data specified by `keys` have same spatial shape and will randomly dropout the same regions @@ -1526,31 +1472,29 @@ def __init__( allow_missing_keys: bool = False, ): MapTransform.__init__(self, keys, allow_missing_keys) + RandomizableTransform.__init__(self, prob=prob) self.shuffle = RandCoarseShuffle( holes=holes, spatial_size=spatial_size, max_holes=max_holes, max_spatial_size=max_spatial_size, - prob=prob, + prob=1.0, ) - def set_random_state( - self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None - ) -> "RandCoarseShuffled": - self.shuffle.set_random_state(seed, state) + def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): super().set_random_state(seed, state) - return self - - def randomize(self, img_size: Sequence[int]) -> None: - self.shuffle.randomize(img_size=img_size) + self.shuffle.set_random_state(seed, state) def __call__(self, data): d = dict(data) - # expect all the specified keys have same spatial shape - self.randomize(d[self.keys[0]].shape[1:]) - if self.shuffle._do_transform: - for key in self.key_iterator(d): - d[key] = self.shuffle(img=d[key]) + self.randomize(None) + if not self._do_transform: + return d + + # expect all the specified keys have same spatial shape and share same random holes + self.shuffle.randomize(d[self.keys[0]].shape[1:]) + for key in self.key_iterator(d): + d[key] = self.shuffle(img=d[key], randomize=False) return d diff --git a/tests/test_rand_gibbs_noised.py b/tests/test_rand_gibbs_noised.py index b8bac67b81..ac5fc164e2 100644 --- a/tests/test_rand_gibbs_noised.py +++ b/tests/test_rand_gibbs_noised.py @@ -110,7 +110,7 @@ def test_alpha(self, im_shape, input_type): alpha = [0.5, 0.51] t = RandGibbsNoised(KEYS, 1.0, alpha) _ = t(deepcopy(data)) - self.assertTrue(0.5 <= t.sampled_alpha <= 0.51) + self.assertTrue(0.5 <= t.rand_gibbs_noise.sampled_alpha <= 0.51) if __name__ == "__main__": diff --git a/tests/test_rand_histogram_shift.py b/tests/test_rand_histogram_shift.py index fa51dacefa..e38e2ea5f8 100644 --- a/tests/test_rand_histogram_shift.py +++ b/tests/test_rand_histogram_shift.py @@ -15,7 +15,7 @@ from parameterized import parameterized from monai.transforms import RandHistogramShift -from tests.utils import TEST_NDARRAYS +from tests.utils import TEST_NDARRAYS, assert_allclose TESTS = [] for p in TEST_NDARRAYS: @@ -48,7 +48,7 @@ def test_rand_histogram_shift(self, input_param, input_data, expected_val): g = RandHistogramShift(**input_param) g.set_random_state(123) result = g(**input_data) - np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) + assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4, type_test=False) if __name__ == "__main__": diff --git a/tests/test_rand_k_space_spike_noised.py b/tests/test_rand_k_space_spike_noised.py index 869aa50872..e41ba19a5c 100644 --- a/tests/test_rand_k_space_spike_noised.py +++ b/tests/test_rand_k_space_spike_noised.py @@ -49,18 +49,16 @@ def test_same_result(self, im_shape, im_type): data = self.get_data(im_shape, im_type) - intensity_ranges = {"image": (13, 15), "label": (13, 15)} t = RandKSpaceSpikeNoised( KEYS, - global_prob=1.0, prob=1.0, - intensity_ranges=intensity_ranges, + intensity_range=(13, 15), channel_wise=True, ) - t.set_rand_state(42) + t.set_random_state(42) out1 = t(deepcopy(data)) - t.set_rand_state(42) + t.set_random_state(42) out2 = t(deepcopy(data)) for k in KEYS: @@ -74,20 +72,18 @@ def test_same_result(self, im_shape, im_type): @parameterized.expand(TESTS) def test_0_prob(self, im_shape, im_type): data = self.get_data(im_shape, im_type) - intensity_ranges = {"image": (13, 15), "label": (13, 15)} + t1 = RandKSpaceSpikeNoised( KEYS, - global_prob=0.0, - prob=1.0, - intensity_ranges=intensity_ranges, + prob=0.0, + intensity_range=(13, 15), channel_wise=True, ) t2 = RandKSpaceSpikeNoised( KEYS, - global_prob=0.0, - prob=1.0, - intensity_ranges=intensity_ranges, + prob=0.0, + intensity_range=(13, 15), channel_wise=True, ) out1 = t1(data) @@ -104,52 +100,6 @@ def test_0_prob(self, im_shape, im_type): np.testing.assert_allclose(data[k], out1[k]) np.testing.assert_allclose(data[k], out2[k]) - @parameterized.expand(TESTS) - def test_intensity(self, im_shape, im_type): - - data = self.get_data(im_shape, im_type) - intensity_ranges = {"image": (13, 13.1), "label": (13, 13.1)} - t = RandKSpaceSpikeNoised( - KEYS, - global_prob=1.0, - prob=1.0, - intensity_ranges=intensity_ranges, - channel_wise=True, - ) - - _ = t(data) - self.assertGreaterEqual(t.transforms["image"].sampled_k_intensity[0], 13) - self.assertLessEqual(t.transforms["image"].sampled_k_intensity[0], 13.1) - self.assertGreaterEqual(t.transforms["label"].sampled_k_intensity[0], 13) - self.assertLessEqual(t.transforms["label"].sampled_k_intensity[0], 13.1) - - @parameterized.expand(TESTS) - def test_same_transformation(self, im_shape, im_type): - data = self.get_data(im_shape, im_type) - # use same image for both dictionary entries to check same trans is applied to them - data = {KEYS[0]: deepcopy(data[KEYS[0]]), KEYS[1]: deepcopy(data[KEYS[0]])} - - intensity_ranges = {"image": (13, 15), "label": (13, 15)} - # use common_sampling = True to ask for the same transformation - t = RandKSpaceSpikeNoised( - KEYS, - global_prob=1.0, - prob=1.0, - intensity_ranges=intensity_ranges, - channel_wise=True, - common_sampling=True, - ) - - out = t(deepcopy(data)) - - for k in KEYS: - self.assertEqual(type(out[k]), type(data[k])) - if isinstance(out[k], torch.Tensor): - self.assertEqual(out[k].device, data[k].device) - out[k] = out[k].cpu() - - np.testing.assert_allclose(out[KEYS[0]], out[KEYS[1]]) - if __name__ == "__main__": unittest.main() diff --git a/tests/test_rand_rician_noised.py b/tests/test_rand_rician_noised.py index 010bbcb310..e8cb84dc99 100644 --- a/tests/test_rand_rician_noised.py +++ b/tests/test_rand_rician_noised.py @@ -34,6 +34,7 @@ def test_correct_results(self, _, in_type, keys, mean, std): noised = rician_fn({k: in_type(self.imt) for k in keys}) np.random.seed(seed) for k in keys: + # simulate the `randomize` function of transform np.random.random() _std = np.random.uniform(0, std) expected = np.sqrt( diff --git a/tests/test_rand_scale_intensityd.py b/tests/test_rand_scale_intensityd.py index 86bcb78a80..7c2392fded 100644 --- a/tests/test_rand_scale_intensityd.py +++ b/tests/test_rand_scale_intensityd.py @@ -25,7 +25,7 @@ def test_value(self): scaler.set_random_state(seed=0) result = scaler({key: p(self.imt)}) np.random.seed(0) - # simulate the randomize() of transform + # simulate the randomize function of transform np.random.random() expected = (self.imt * (1 + np.random.uniform(low=-0.5, high=0.5))).astype(np.float32) assert_allclose(result[key], p(expected)) From 63d0c067ad26309dc6d395bb43633c021e1d2215 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 12 Oct 2021 01:31:07 +0800 Subject: [PATCH 5/7] [DLMED] fix tests Signed-off-by: Nic Ma --- monai/transforms/intensity/array.py | 4 ++-- tests/test_rand_coarse_shuffled.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index fea3b3b5a5..a98e2f9f0f 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -820,9 +820,9 @@ def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTen if randomize: self.randomize() - if self.gamma_value is None: - raise RuntimeError("gamma_value is not set, please call `randomize` function first.") if self._do_transform: + if self.gamma_value is None: + raise RuntimeError("gamma_value is not set, please call `randomize` function first.") img = AdjustContrast(self.gamma_value)(img) return img diff --git a/tests/test_rand_coarse_shuffled.py b/tests/test_rand_coarse_shuffled.py index d2845fdaae..775fdae07e 100644 --- a/tests/test_rand_coarse_shuffled.py +++ b/tests/test_rand_coarse_shuffled.py @@ -28,17 +28,17 @@ np.asarray( [ [ - [[13, 17, 5], [6, 16, 25], [12, 15, 22]], - [[24, 7, 3], [9, 2, 23], [0, 4, 26]], - [[19, 11, 14], [1, 20, 8], [18, 10, 21]], - ] - ] + [[8, 19, 26], [24, 6, 15], [0, 13, 25]], + [[17, 3, 5], [10, 1, 12], [22, 4, 11]], + [[21, 20, 23], [14, 2, 16], [18, 9, 7]], + ], + ], ), ], [ {"keys": "img", "holes": 2, "spatial_size": 1, "max_spatial_size": -1, "prob": 1.0}, {"img": np.arange(16).reshape((2, 2, 2, 2))}, - np.asarray([[[[7, 2], [1, 4]], [[5, 0], [3, 6]]], [[[8, 13], [10, 15]], [[14, 12], [11, 9]]]]), + np.asarray([[[[6, 1], [4, 3]], [[0, 2], [7, 5]]], [[[14, 10], [9, 8]], [[12, 15], [13, 11]]]]), ], ] From 896678e7893bfa7cebb261d488511fdf03eff25c Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 12 Oct 2021 10:11:32 +0800 Subject: [PATCH 6/7] [DLMED] update according to comments Signed-off-by: Nic Ma --- monai/transforms/intensity/array.py | 308 +++++++++++++++------------- 1 file changed, 161 insertions(+), 147 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index a98e2f9f0f..b4bd4a0fa5 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -99,9 +99,10 @@ def __init__(self, prob: float = 0.1, mean: float = 0.0, std: float = 0.1) -> No def randomize(self, img: NdarrayOrTensor, mean: Optional[float] = None) -> None: super().randomize(None) - if self._do_transform: - rand_std = self.R.uniform(0, self.std) - self.noise = self.R.normal(self.mean if mean is None else mean, rand_std, size=img.shape) + if not self._do_transform: + return None + rand_std = self.R.uniform(0, self.std) + self.noise = self.R.normal(self.mean if mean is None else mean, rand_std, size=img.shape) def __call__(self, img: NdarrayOrTensor, mean: Optional[float] = None, randomize: bool = True) -> NdarrayOrTensor: """ @@ -110,13 +111,13 @@ def __call__(self, img: NdarrayOrTensor, mean: Optional[float] = None, randomize if randomize: self.randomize(img=img, mean=self.mean if mean is None else mean) - if self._do_transform: - if self.noise is None: - raise RuntimeError("please call the `randomize()` function first.") - noise, *_ = convert_to_dst_type(self.noise, img) - img = img + noise + if not self._do_transform: + return img - return img + if self.noise is None: + raise RuntimeError("please call the `randomize()` function first.") + noise, *_ = convert_to_dst_type(self.noise, img) + return img + noise class RandRicianNoise(RandomizableTransform): @@ -183,22 +184,23 @@ def __call__(self, img: NdarrayTensor, randomize: bool = True) -> NdarrayTensor: if randomize: super().randomize(None) - if self._do_transform: - if self.channel_wise: - _mean = ensure_tuple_rep(self.mean, len(img)) - _std = ensure_tuple_rep(self.std, len(img)) - for i, d in enumerate(img): - img[i] = self._add_noise(d, mean=_mean[i], std=_std[i] * d.std() if self.relative else _std[i]) - else: - if not isinstance(self.mean, (int, float)): - raise RuntimeError("If channel_wise is False, mean must be a float or int number.") - if not isinstance(self.std, (int, float)): - raise RuntimeError("If channel_wise is False, std must be a float or int number.") - std = self.std * img.std() if self.relative else self.std - if not isinstance(std, (int, float)): - raise RuntimeError("std must be a float or int number.") - img = self._add_noise(img, mean=self.mean, std=std) + if not self._do_transform: + return img + if self.channel_wise: + _mean = ensure_tuple_rep(self.mean, len(img)) + _std = ensure_tuple_rep(self.std, len(img)) + for i, d in enumerate(img): + img[i] = self._add_noise(d, mean=_mean[i], std=_std[i] * d.std() if self.relative else _std[i]) + else: + if not isinstance(self.mean, (int, float)): + raise RuntimeError("If channel_wise is False, mean must be a float or int number.") + if not isinstance(self.std, (int, float)): + raise RuntimeError("If channel_wise is False, std must be a float or int number.") + std = self.std * img.std() if self.relative else self.std + if not isinstance(std, (int, float)): + raise RuntimeError("std must be a float or int number.") + img = self._add_noise(img, mean=self.mean, std=std) return img @@ -253,8 +255,9 @@ def __init__(self, offsets: Union[Tuple[float, float], float], prob: float = 0.1 def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - if self._do_transform: - self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) + if not self._do_transform: + return None + self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1]) def __call__( self, @@ -274,10 +277,10 @@ def __call__( if randomize: self.randomize() - if self._do_transform: - img = self._shfiter(img, self._offset if factor is None else self._offset * factor) + if not self._do_transform: + return img - return img + return self._shfiter(img, self._offset if factor is None else self._offset * factor) class StdShiftIntensity(Transform): @@ -374,8 +377,9 @@ def __init__( def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - if self._do_transform: - self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) + if not self._do_transform: + return None + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: """ @@ -384,13 +388,13 @@ def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTen if randomize: self.randomize() - if self._do_transform: - shifter = StdShiftIntensity( - factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise, dtype=self.dtype - ) - img = shifter(img=img) + if not self._do_transform: + return img - return img + shifter = StdShiftIntensity( + factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise, dtype=self.dtype + ) + return shifter(img=img) class ScaleIntensity(Transform): @@ -480,8 +484,9 @@ def __init__( def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - if self._do_transform: - self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) + if not self._do_transform: + return None + self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1]) def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: """ @@ -490,10 +495,10 @@ def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTen if randomize: self.randomize() - if self._do_transform: - img = ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype)(img) + if not self._do_transform: + return img - return img + return ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype)(img) class RandBiasField(RandomizableTransform): @@ -557,9 +562,10 @@ def _generate_random_field(self, spatial_shape: Sequence[int], degree: int, coef def randomize(self, data: np.ndarray) -> None: super().randomize(None) - if self._do_transform: - n_coeff = int(np.prod([(self.degree + k) / k for k in range(1, len(data.shape[1:]) + 1)])) - self._coeff = self.R.uniform(*self.coeff_range, n_coeff).tolist() + if not self._do_transform: + return None + n_coeff = int(np.prod([(self.degree + k) / k for k in range(1, len(data.shape[1:]) + 1)])) + self._coeff = self.R.uniform(*self.coeff_range, n_coeff).tolist() def __call__(self, img: np.ndarray, randomize: bool = True): """ @@ -568,19 +574,19 @@ def __call__(self, img: np.ndarray, randomize: bool = True): if randomize: self.randomize(data=img) - if self._do_transform: - img, *_ = convert_data_type(img, np.ndarray) # type: ignore - num_channels, *spatial_shape = img.shape - _bias_fields = np.stack( - [ - self._generate_random_field(spatial_shape=spatial_shape, degree=self.degree, coeff=self._coeff) - for _ in range(num_channels) - ], - axis=0, - ) - img = (img * np.exp(_bias_fields)).astype(self.dtype) + if not self._do_transform: + return img - return img + img, *_ = convert_data_type(img, np.ndarray) # type: ignore + num_channels, *spatial_shape = img.shape + _bias_fields = np.stack( + [ + self._generate_random_field(spatial_shape=spatial_shape, degree=self.degree, coeff=self._coeff) + for _ in range(num_channels) + ], + axis=0, + ) + return (img * np.exp(_bias_fields)).astype(self.dtype) class NormalizeIntensity(Transform): @@ -810,8 +816,9 @@ def __init__(self, prob: float = 0.1, gamma: Union[Sequence[float], float] = (0. def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - if self._do_transform: - self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1]) + if not self._do_transform: + return None + self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1]) def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: """ @@ -820,12 +827,12 @@ def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTen if randomize: self.randomize() - if self._do_transform: - if self.gamma_value is None: - raise RuntimeError("gamma_value is not set, please call `randomize` function first.") - img = AdjustContrast(self.gamma_value)(img) + if not self._do_transform: + return img - return img + if self.gamma_value is None: + raise RuntimeError("gamma_value is not set, please call `randomize` function first.") + return AdjustContrast(self.gamma_value)(img) class ScaleIntensityRangePercentiles(Transform): @@ -1130,20 +1137,21 @@ def __init__( def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - if self._do_transform: - self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1]) - self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1]) - self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1]) + if not self._do_transform: + return None + self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1]) + self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1]) + self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1]) def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: if randomize: self.randomize() - if self._do_transform: - sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=img.ndim - 1) - img = GaussianSmooth(sigma=sigma, approx=self.approx)(img) + if not self._do_transform: + return img - return img + sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=img.ndim - 1) + return GaussianSmooth(sigma=sigma, approx=self.approx)(img) class GaussianSharpen(Transform): @@ -1257,30 +1265,31 @@ def __init__( def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - if self._do_transform: - self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1]) - self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1]) - self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1]) - sigma2_x = (self.sigma2_x, self.x1) if not isinstance(self.sigma2_x, Iterable) else self.sigma2_x - sigma2_y = (self.sigma2_y, self.y1) if not isinstance(self.sigma2_y, Iterable) else self.sigma2_y - sigma2_z = (self.sigma2_z, self.z1) if not isinstance(self.sigma2_z, Iterable) else self.sigma2_z - self.x2 = self.R.uniform(low=sigma2_x[0], high=sigma2_x[1]) - self.y2 = self.R.uniform(low=sigma2_y[0], high=sigma2_y[1]) - self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1]) - self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1]) + if not self._do_transform: + return None + self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1]) + self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1]) + self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1]) + sigma2_x = (self.sigma2_x, self.x1) if not isinstance(self.sigma2_x, Iterable) else self.sigma2_x + sigma2_y = (self.sigma2_y, self.y1) if not isinstance(self.sigma2_y, Iterable) else self.sigma2_y + sigma2_z = (self.sigma2_z, self.z1) if not isinstance(self.sigma2_z, Iterable) else self.sigma2_z + self.x2 = self.R.uniform(low=sigma2_x[0], high=sigma2_x[1]) + self.y2 = self.R.uniform(low=sigma2_y[0], high=sigma2_y[1]) + self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1]) + self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1]) def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: if randomize: self.randomize() - if self._do_transform: - if self.x2 is None or self.y2 is None or self.z2 is None or self.a is None: - raise RuntimeError("please call the `randomize()` function first.") - sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=img.ndim - 1) - sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=img.ndim - 1) - img = GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(img) + if not self._do_transform: + return img - return img + if self.x2 is None or self.y2 is None or self.z2 is None or self.a is None: + raise RuntimeError("please call the `randomize()` function first.") + sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=img.ndim - 1) + sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=img.ndim - 1) + return GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(img) class RandHistogramShift(RandomizableTransform): @@ -1314,32 +1323,34 @@ def __init__(self, num_control_points: Union[Tuple[int, int], int] = 10, prob: f def randomize(self, data: Optional[Any] = None) -> None: super().randomize(None) - if self._do_transform: - num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1) - self.reference_control_points = np.linspace(0, 1, num_control_point) - self.floating_control_points = np.copy(self.reference_control_points) - for i in range(1, num_control_point - 1): - self.floating_control_points[i] = self.R.uniform( - self.floating_control_points[i - 1], self.floating_control_points[i + 1] - ) + if not self._do_transform: + return None + num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1) + self.reference_control_points = np.linspace(0, 1, num_control_point) + self.floating_control_points = np.copy(self.reference_control_points) + for i in range(1, num_control_point - 1): + self.floating_control_points[i] = self.R.uniform( + self.floating_control_points[i - 1], self.floating_control_points[i + 1] + ) def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: if randomize: self.randomize() - if self._do_transform: - if self.reference_control_points is None or self.floating_control_points is None: - raise RuntimeError("please call the `randomize()` function first.") - img_np: np.ndarray - img_np, *_ = convert_data_type(img, np.ndarray) # type: ignore - img_min, img_max = img_np.min(), img_np.max() - reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min - floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min - img_np = np.asarray( - np.interp(img_np, reference_control_points_scaled, floating_control_points_scaled), dtype=img_np.dtype - ) - img, *_ = convert_to_dst_type(img_np, dst=img) + if not self._do_transform: + return img + if self.reference_control_points is None or self.floating_control_points is None: + raise RuntimeError("please call the `randomize()` function first.") + img_np: np.ndarray + img_np, *_ = convert_data_type(img, np.ndarray) # type: ignore + img_min, img_max = img_np.min(), img_np.max() + reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min + floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min + img_np = np.asarray( + np.interp(img_np, reference_control_points_scaled, floating_control_points_scaled), dtype=img_np.dtype + ) + img, *_ = convert_to_dst_type(img_np, dst=img) return img @@ -1461,18 +1472,19 @@ def randomize(self, data: Any) -> None: (2) Get alpha from uniform distribution. """ super().randomize(None) - if self._do_transform: - self.sampled_alpha = self.R.uniform(self.alpha[0], self.alpha[1]) + if not self._do_transform: + return None + self.sampled_alpha = self.R.uniform(self.alpha[0], self.alpha[1]) - def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: + def __call__(self, img: NdarrayOrTensor, randomize: bool = True): if randomize: # randomize application and possibly alpha self.randomize(None) - if self._do_transform: - img = GibbsNoise(self.sampled_alpha)(img) + if not self._do_transform: + return img - return img + return GibbsNoise(self.sampled_alpha)(img) class KSpaceSpikeNoise(Transform, Fourier): @@ -1665,7 +1677,7 @@ def __init__( super().__init__(prob) - def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor: + def __call__(self, img: NdarrayOrTensor, randomize: bool = True): """ Apply transform to `img`. Assumes data is in channel-first form. @@ -1689,10 +1701,10 @@ def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTen intensity_range = self._make_sequence(img) self.randomize(img, intensity_range) - if self._do_transform: - img = KSpaceSpikeNoise(self.sampled_locs, self.sampled_k_intensity)(img) + if not self._do_transform: + return img - return img + return KSpaceSpikeNoise(self.sampled_locs, self.sampled_k_intensity)(img) def randomize(self, img: NdarrayOrTensor, intensity_range: Sequence[Sequence[float]]) -> None: # type: ignore """ @@ -1705,20 +1717,21 @@ def randomize(self, img: NdarrayOrTensor, intensity_range: Sequence[Sequence[flo intensity for each channel depending on ``self._do_transform``. """ super().randomize(None) - if self._do_transform: - if self.channel_wise: - # randomizing per channel - for i, chan in enumerate(img): - self.sampled_locs.append((i,) + tuple(self.R.randint(0, k) for k in chan.shape)) - self.sampled_k_intensity.append(self.R.uniform(intensity_range[i][0], intensity_range[i][1])) + if not self._do_transform: + return None + if self.channel_wise: + # randomizing per channel + for i, chan in enumerate(img): + self.sampled_locs.append((i,) + tuple(self.R.randint(0, k) for k in chan.shape)) + self.sampled_k_intensity.append(self.R.uniform(intensity_range[i][0], intensity_range[i][1])) + else: + # working with all channels together + spatial = tuple(self.R.randint(0, k) for k in img.shape[1:]) + self.sampled_locs = [(i,) + spatial for i in range(img.shape[0])] + if isinstance(intensity_range[0], Sequence): + self.sampled_k_intensity = [self.R.uniform(p[0], p[1]) for p in intensity_range] else: - # working with all channels together - spatial = tuple(self.R.randint(0, k) for k in img.shape[1:]) - self.sampled_locs = [(i,) + spatial for i in range(img.shape[0])] - if isinstance(intensity_range[0], Sequence): - self.sampled_k_intensity = [self.R.uniform(p[0], p[1]) for p in intensity_range] - else: - self.sampled_k_intensity = [self.R.uniform(intensity_range[0], intensity_range[1])] * len(img) + self.sampled_k_intensity = [self.R.uniform(intensity_range[0], intensity_range[1])] * len(img) def _make_sequence(self, x: NdarrayOrTensor) -> Sequence[Sequence[float]]: """ @@ -1790,16 +1803,17 @@ def __init__( def randomize(self, img_size: Sequence[int]) -> None: super().randomize(None) - if self._do_transform: - size = fall_back_tuple(self.spatial_size, img_size) - self.hole_coords = [] # clear previously computed coords - num_holes = self.holes if self.max_holes is None else self.R.randint(self.holes, self.max_holes + 1) - for _ in range(num_holes): - if self.max_spatial_size is not None: - max_size = fall_back_tuple(self.max_spatial_size, img_size) - size = tuple(self.R.randint(low=size[i], high=max_size[i] + 1) for i in range(len(img_size))) - valid_size = get_valid_patch_size(img_size, size) - self.hole_coords.append((slice(None),) + get_random_patch(img_size, valid_size, self.R)) + if not self._do_transform: + return None + size = fall_back_tuple(self.spatial_size, img_size) + self.hole_coords = [] # clear previously computed coords + num_holes = self.holes if self.max_holes is None else self.R.randint(self.holes, self.max_holes + 1) + for _ in range(num_holes): + if self.max_spatial_size is not None: + max_size = fall_back_tuple(self.max_spatial_size, img_size) + size = tuple(self.R.randint(low=size[i], high=max_size[i] + 1) for i in range(len(img_size))) + valid_size = get_valid_patch_size(img_size, size) + self.hole_coords.append((slice(None),) + get_random_patch(img_size, valid_size, self.R)) @abstractmethod def _transform_holes(self, img: np.ndarray) -> np.ndarray: @@ -1813,11 +1827,11 @@ def __call__(self, img: np.ndarray, randomize: bool = True): if randomize: self.randomize(img.shape[1:]) - if self._do_transform: - img, *_ = convert_data_type(img, np.ndarray) # type: ignore - img = self._transform_holes(img=img) + if not self._do_transform: + return img - return img + img, *_ = convert_data_type(img, np.ndarray) # type: ignore + return self._transform_holes(img=img) class RandCoarseDropout(RandCoarseTransform): From 14e785f0988ec6b422f52aacf18b1fdd87a94ba1 Mon Sep 17 00:00:00 2001 From: Nic Ma Date: Tue, 12 Oct 2021 19:42:31 +0800 Subject: [PATCH 7/7] [DLMED] update according to comments Signed-off-by: Nic Ma --- monai/transforms/intensity/dictionary.py | 84 ++++++++++++++++++------ 1 file changed, 63 insertions(+), 21 deletions(-) diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 26b8ac6447..6891d69bb8 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -171,13 +171,16 @@ def __init__( RandomizableTransform.__init__(self, prob) self.rand_gaussian_noise = RandGaussianNoise(mean=mean, std=std, prob=1.0) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandGaussianNoised": super().set_random_state(seed, state) self.rand_gaussian_noise.set_random_state(seed, state) + return self def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - super().randomize(None) + self.randomize(None) if not self._do_transform: return d @@ -238,13 +241,16 @@ def __init__( sample_std=sample_std, ) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandRicianNoised": super().set_random_state(seed, state) self.rand_rician_noise.set_random_state(seed, state) + return self def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) - super().randomize(None) + self.randomize(None) if not self._do_transform: return d @@ -361,13 +367,16 @@ def __init__( self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys)) self.shifter = RandShiftIntensity(offsets=offsets, prob=1.0) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandShiftIntensityd": super().set_random_state(seed, state) self.shifter.set_random_state(seed, state) + return self def __call__(self, data) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - super().randomize(None) + self.randomize(None) if not self._do_transform: return d @@ -458,13 +467,16 @@ def __init__( prob=1.0, ) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandStdShiftIntensityd": super().set_random_state(seed, state) self.shifter.set_random_state(seed, state) + return self def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - super().randomize(None) + self.randomize(None) if not self._do_transform: return d @@ -549,13 +561,16 @@ def __init__( RandomizableTransform.__init__(self, prob) self.scaler = RandScaleIntensity(factors=factors, dtype=dtype, prob=1.0) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandScaleIntensityd": super().set_random_state(seed, state) self.scaler.set_random_state(seed, state) + return self def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - super().randomize(None) + self.randomize(None) if not self._do_transform: return d @@ -597,13 +612,16 @@ def __init__( self.rand_bias_field = RandBiasField(degree=degree, coeff_range=coeff_range, dtype=dtype, prob=1.0) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandBiasFieldd": super().set_random_state(seed, state) self.rand_bias_field.set_random_state(seed, state) + return self def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]: d = dict(data) - super().randomize(None) + self.randomize(None) if not self._do_transform: return d @@ -780,13 +798,16 @@ def __init__( RandomizableTransform.__init__(self, prob) self.adjuster = RandAdjustContrast(gamma=gamma, prob=1.0) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandAdjustContrastd": super().set_random_state(seed, state) self.adjuster.set_random_state(seed, state) + return self def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) - super().randomize(None) + self.randomize(None) if not self._do_transform: return d @@ -950,9 +971,12 @@ def __init__( prob=1.0, ) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandGaussianSmoothd": super().set_random_state(seed, state) self.rand_smooth.set_random_state(seed, state) + return self def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) @@ -1062,9 +1086,12 @@ def __init__( prob=1.0, ) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandGaussianSharpend": super().set_random_state(seed, state) self.rand_sharpen.set_random_state(seed, state) + return self def __call__(self, data: Dict[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) @@ -1107,9 +1134,12 @@ def __init__( RandomizableTransform.__init__(self, prob) self.shifter = RandHistogramShift(num_control_points=num_control_points, prob=1.0) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandHistogramShiftd": super().set_random_state(seed, state) self.shifter.set_random_state(seed, state) + return self def __call__(self, data: Dict[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) @@ -1165,9 +1195,12 @@ def __init__( RandomizableTransform.__init__(self, prob=prob) self.rand_gibbs_noise = RandGibbsNoise(alpha=alpha, prob=1.0) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandGibbsNoised": super().set_random_state(seed, state) self.rand_gibbs_noise.set_random_state(seed, state) + return self def __call__(self, data: Dict[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) @@ -1346,9 +1379,12 @@ def __init__( RandomizableTransform.__init__(self, prob=prob) self.rand_noise = RandKSpaceSpikeNoise(prob=1.0, intensity_range=intensity_range, channel_wise=channel_wise) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandKSpaceSpikeNoised": super().set_random_state(seed, state) self.rand_noise.set_random_state(seed, state) + return self def __call__(self, data: Dict[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: d = dict(data) @@ -1417,9 +1453,12 @@ def __init__( prob=1.0, ) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandCoarseDropoutd": super().set_random_state(seed, state) self.dropper.set_random_state(seed, state) + return self def __call__(self, data): d = dict(data) @@ -1481,9 +1520,12 @@ def __init__( prob=1.0, ) - def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None): + def set_random_state( + self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None + ) -> "RandCoarseShuffled": super().set_random_state(seed, state) self.shuffle.set_random_state(seed, state) + return self def __call__(self, data): d = dict(data)