Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com>
  • Loading branch information
KumoLiu committed May 27, 2024
1 parent e5afa43 commit dcfeb59
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 61 deletions.
75 changes: 44 additions & 31 deletions tests/test_clip_intensity_percentiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,73 +18,91 @@
from monai.transforms import ClipIntensityPercentiles
from monai.transforms.utils import soft_clip
from monai.transforms.utils_pytorch_numpy_unification import clip, percentile
from monai.utils.type_conversion import convert_to_tensor
from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose


def test_hard_clip_func(im, lower, upper):
im_t = convert_to_tensor(im)
if lower is None:
upper = percentile(im_t, upper)
elif upper is None:
lower = percentile(im_t, lower)
else:
lower, upper = percentile(im_t, (lower, upper))
return clip(im_t, lower, upper)

def test_soft_clip_func(im, lower, upper):
im_t = convert_to_tensor(im)
if lower is None:
upper = percentile(im_t, upper)
elif upper is None:
lower = percentile(im_t, lower)
else:
lower, upper = percentile(im_t, (lower, upper))
return soft_clip(im_t, minv=lower, maxv=upper, sharpness_factor=1.0, dtype=torch.float32)


class TestClipIntensityPercentiles2D(NumpyImageTestCase2D):

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_two_sided(self, p):
hard_clipper = ClipIntensityPercentiles(upper=95, lower=5)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 95)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_high(self, p):
hard_clipper = ClipIntensityPercentiles(upper=95, lower=None)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 0, 95)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_low(self, p):
hard_clipper = ClipIntensityPercentiles(upper=None, lower=5)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 100)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_two_sided(self, p):
soft_clipper = ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=upper, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_one_sided_high(self, p):
soft_clipper = ClipIntensityPercentiles(upper=95, lower=None, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
upper = percentile(im, 95)
expected = soft_clip(im, sharpness_factor=1.0, minv=None, maxv=upper, dtype=torch.float32)
# the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, None, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_one_sided_low(self, p):
soft_clipper = ClipIntensityPercentiles(upper=None, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
lower = percentile(im, 5)
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=None, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, None)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_channel_wise(self, p):
clipper = ClipIntensityPercentiles(upper=95, lower=5, channel_wise=True)
im = p(self.imt)
result = clipper(im)
for i, c in enumerate(im):
im_t = convert_to_tensor(self.imt)
for i, c in enumerate(im_t):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-4, atol=0)
Expand Down Expand Up @@ -117,35 +135,31 @@ def test_hard_clipping_two_sided(self, p):
hard_clipper = ClipIntensityPercentiles(upper=95, lower=5)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 95)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_high(self, p):
hard_clipper = ClipIntensityPercentiles(upper=95, lower=None)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 0, 95)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_low(self, p):
hard_clipper = ClipIntensityPercentiles(upper=None, lower=5)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 100)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_two_sided(self, p):
soft_clipper = ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=upper, dtype=torch.float32)
expected = test_soft_clip_func(im, 5, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

Expand All @@ -154,27 +168,26 @@ def test_soft_clipping_one_sided_high(self, p):
soft_clipper = ClipIntensityPercentiles(upper=95, lower=None, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
upper = percentile(im, 95)
expected = soft_clip(im, sharpness_factor=1.0, minv=None, maxv=upper, dtype=torch.float32)
# the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, None, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_one_sided_low(self, p):
soft_clipper = ClipIntensityPercentiles(upper=None, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
lower = percentile(im, 5)
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=None, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, None)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_channel_wise(self, p):
clipper = ClipIntensityPercentiles(upper=95, lower=5, channel_wise=True)
im = p(self.imt)
result = clipper(im)
for i, c in enumerate(im):
im_t = convert_to_tensor(self.imt)
for i, c in enumerate(im_t):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-4, atol=0)
Expand Down
52 changes: 22 additions & 30 deletions tests/test_clip_intensity_percentilesd.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@
from monai.transforms import ClipIntensityPercentilesd
from monai.transforms.utils import soft_clip
from monai.transforms.utils_pytorch_numpy_unification import clip, percentile
from monai.utils.type_conversion import convert_to_tensor
from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose
from .test_clip_intensity_percentiles import test_hard_clip_func, test_soft_clip_func


class TestClipIntensityPercentilesd2D(NumpyImageTestCase2D):
Expand All @@ -30,8 +32,7 @@ def test_hard_clipping_two_sided(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 95)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -40,8 +41,7 @@ def test_hard_clipping_one_sided_high(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 0, 95)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -50,8 +50,7 @@ def test_hard_clipping_one_sided_low(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 100)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -60,9 +59,8 @@ def test_soft_clipping_two_sided(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=upper, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -71,9 +69,8 @@ def test_soft_clipping_one_sided_high(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
upper = percentile(im, 95)
expected = soft_clip(im, sharpness_factor=1.0, minv=None, maxv=upper, dtype=torch.float32)
# the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, None, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -82,9 +79,8 @@ def test_soft_clipping_one_sided_low(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
lower = percentile(im, 5)
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=None, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, None)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -93,7 +89,8 @@ def test_channel_wise(self, p):
clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, channel_wise=True)
im = p(self.imt)
result = clipper({key: im})
for i, c in enumerate(im):
im_t = convert_to_tensor(self.imt)
for i, c in enumerate(im_t):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-3, atol=0)
Expand Down Expand Up @@ -132,8 +129,7 @@ def test_hard_clipping_two_sided(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 95)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -142,8 +138,7 @@ def test_hard_clipping_one_sided_high(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 0, 95)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -152,8 +147,7 @@ def test_hard_clipping_one_sided_low(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 100)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -162,8 +156,7 @@ def test_soft_clipping_two_sided(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=upper, dtype=torch.float32)
expected = test_soft_clip_func(im, 5, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

Expand All @@ -173,9 +166,8 @@ def test_soft_clipping_one_sided_high(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
upper = percentile(im, 95)
expected = soft_clip(im, sharpness_factor=1.0, minv=None, maxv=upper, dtype=torch.float32)
# the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, None, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -184,8 +176,7 @@ def test_soft_clipping_one_sided_low(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
lower = percentile(im, 5)
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=None, dtype=torch.float32)
expected = test_soft_clip_func(im, 5, None)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

Expand All @@ -195,7 +186,8 @@ def test_channel_wise(self, p):
clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, channel_wise=True)
im = p(self.imt)
result = clipper({key: im})
for i, c in enumerate(im):
im_t = convert_to_tensor(im)
for i, c in enumerate(im_t):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-4, atol=0)
Expand Down

0 comments on commit dcfeb59

Please sign in to comment.