diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index dc684f726a..3544dd2c41 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -1008,6 +1008,7 @@ def __init__(self, name: str, *args, **kwargs) -> None: """ super().__init__() + self.name = name transform, _ = optional_import("torchvision.transforms", "0.8.0", min_version, name=name) self.trans = transform(*args, **kwargs) @@ -1196,6 +1197,7 @@ class CuCIM(Transform): def __init__(self, name: str, *args, **kwargs) -> None: super().__init__() + self.name = name self.transform, _ = optional_import("cucim.core.operations.expose.transform", name=name) self.args = args self.kwargs = kwargs diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index a15d5368fa..447fdc6f17 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -1325,6 +1325,7 @@ def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = F """ super().__init__(keys, allow_missing_keys) + self.name = name self.trans = TorchVision(name, *args, **kwargs) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: @@ -1364,6 +1365,7 @@ def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = F """ MapTransform.__init__(self, keys, allow_missing_keys) + self.name = name self.trans = TorchVision(name, *args, **kwargs) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]: @@ -1525,6 +1527,7 @@ class CuCIMd(MapTransform): def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = False, *args, **kwargs) -> None: super().__init__(keys=keys, allow_missing_keys=allow_missing_keys) + self.name = name self.trans = CuCIM(name, *args, **kwargs) def __call__(self, data): diff --git a/monai/utils/nvtx.py b/monai/utils/nvtx.py index 0693d6fe5b..81a8e8321b 100644 --- a/monai/utils/nvtx.py +++ b/monai/utils/nvtx.py @@ -62,8 +62,15 @@ def __call__(self, obj: Any): # Define the name to be associated to the range if not provided if self.name is None: name = type(obj).__name__ + # If CuCIM or TorchVision transform wrappers are being used, + # append the underlying transform to the name for more clarity + if "CuCIM" in name or "TorchVision" in name: + name = f"{name}_{obj.name}" self.name_counter[name] += 1 - self.name = f"{name}_{self.name_counter[name]}" + if self.name_counter[name] > 1: + self.name = f"{name}_{self.name_counter[name]}" + else: + self.name = name # Define the methods to be wrapped if not provided if self.methods is None: diff --git a/tests/test_nvtx_decorator.py b/tests/test_nvtx_decorator.py index 0955fbb712..6effad5fed 100644 --- a/tests/test_nvtx_decorator.py +++ b/tests/test_nvtx_decorator.py @@ -17,18 +17,25 @@ from monai.transforms import ( Compose, + CuCIM, Flip, FlipD, RandAdjustContrast, + RandCuCIM, RandFlip, Randomizable, Rotate90, + ToCupy, + TorchVision, ToTensor, ToTensorD, ) from monai.utils import Range, optional_import _, has_nvtx = optional_import("torch._C._nvtx", descriptor="NVTX is not installed. Are you sure you have a CUDA build?") +_, has_cp = optional_import("cupy") +_, has_tvt = optional_import("torchvision.transforms") +_, has_cut = optional_import("cucim.core.operations.expose.transform") TEST_CASE_ARRAY_0 = [np.random.randn(3, 3)] @@ -40,10 +47,12 @@ TEST_CASE_TORCH_0 = [torch.randn(3, 3)] TEST_CASE_TORCH_1 = [torch.randn(3, 10, 10)] +TEST_CASE_WRAPPER = [np.random.randn(3, 10, 10)] + +@unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!") class TestNVTXRangeDecorator(unittest.TestCase): @parameterized.expand([TEST_CASE_ARRAY_0, TEST_CASE_ARRAY_1]) - @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!") def test_tranform_array(self, input): transforms = Compose([Range("random flip")(Flip()), Range()(ToTensor())]) # Apply transforms @@ -65,11 +74,10 @@ def test_tranform_array(self, input): self.assertIsInstance(output2, torch.Tensor) self.assertIsInstance(output3, torch.Tensor) np.testing.assert_equal(output.numpy(), output1.numpy()) - np.testing.assert_equal(output.numpy(), output1.numpy()) + np.testing.assert_equal(output.numpy(), output2.numpy()) np.testing.assert_equal(output.numpy(), output3.numpy()) @parameterized.expand([TEST_CASE_DICT_0, TEST_CASE_DICT_1]) - @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!") def test_tranform_dict(self, input): transforms = Compose([Range("random flip dict")(FlipD(keys="image")), Range()(ToTensorD("image"))]) # Apply transforms @@ -94,8 +102,32 @@ def test_tranform_dict(self, input): np.testing.assert_equal(output.numpy(), output2.numpy()) np.testing.assert_equal(output.numpy(), output3.numpy()) + @parameterized.expand([TEST_CASE_WRAPPER]) + @unittest.skipUnless(has_cp, "Requires CuPy.") + @unittest.skipUnless(has_cut, "Requires cuCIM transforms.") + @unittest.skipUnless(has_tvt, "Requires torchvision transforms.") + def test_wrapper_tranforms(self, input): + transform_list = [ + ToTensor(), + TorchVision(name="RandomHorizontalFlip", p=1.0), + ToCupy(), + CuCIM(name="image_flip", spatial_axis=-1), + RandCuCIM(name="rand_image_rotate_90", prob=1.0, max_k=1, spatial_axis=(-2, -1)), + ] + + transforms = Compose(transform_list) + transforms_range = Compose([Range()(t) for t in transform_list]) + + # Apply transforms + output = transforms(input) + + # Apply transforms with Range + output_r = transforms_range(input) + + # Check the outputs + np.testing.assert_equal(output.get(), output_r.get()) + @parameterized.expand([TEST_CASE_ARRAY_1]) - @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!") def test_tranform_randomized(self, input): # Compose deterministic and randomized transforms transforms = Compose( @@ -136,7 +168,6 @@ def test_tranform_randomized(self, input): break @parameterized.expand([TEST_CASE_TORCH_0, TEST_CASE_TORCH_1]) - @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!") def test_network(self, input): # Create a network model = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Sigmoid()) @@ -164,7 +195,6 @@ def test_network(self, input): np.testing.assert_equal(output.numpy(), output3.numpy()) @parameterized.expand([TEST_CASE_TORCH_0, TEST_CASE_TORCH_1]) - @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!") def test_loss(self, input): # Create a network and loss model = torch.nn.Sigmoid() @@ -194,7 +224,6 @@ def test_loss(self, input): np.testing.assert_equal(output.numpy(), output2.numpy()) np.testing.assert_equal(output.numpy(), output3.numpy()) - @unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!") def test_context_manager(self): model = torch.nn.Sigmoid() loss = torch.nn.BCELoss()