Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions monai/transforms/utility/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -1008,6 +1008,7 @@ def __init__(self, name: str, *args, **kwargs) -> None:

"""
super().__init__()
self.name = name
transform, _ = optional_import("torchvision.transforms", "0.8.0", min_version, name=name)
self.trans = transform(*args, **kwargs)

Expand Down Expand Up @@ -1196,6 +1197,7 @@ class CuCIM(Transform):

def __init__(self, name: str, *args, **kwargs) -> None:
super().__init__()
self.name = name
self.transform, _ = optional_import("cucim.core.operations.expose.transform", name=name)
self.args = args
self.kwargs = kwargs
Expand Down
3 changes: 3 additions & 0 deletions monai/transforms/utility/dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -1325,6 +1325,7 @@ def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = F

"""
super().__init__(keys, allow_missing_keys)
self.name = name
self.trans = TorchVision(name, *args, **kwargs)

def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
Expand Down Expand Up @@ -1364,6 +1365,7 @@ def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = F

"""
MapTransform.__init__(self, keys, allow_missing_keys)
self.name = name
self.trans = TorchVision(name, *args, **kwargs)

def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
Expand Down Expand Up @@ -1525,6 +1527,7 @@ class CuCIMd(MapTransform):

def __init__(self, keys: KeysCollection, name: str, allow_missing_keys: bool = False, *args, **kwargs) -> None:
super().__init__(keys=keys, allow_missing_keys=allow_missing_keys)
self.name = name
self.trans = CuCIM(name, *args, **kwargs)

def __call__(self, data):
Expand Down
9 changes: 8 additions & 1 deletion monai/utils/nvtx.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,15 @@ def __call__(self, obj: Any):
# Define the name to be associated to the range if not provided
if self.name is None:
name = type(obj).__name__
# If CuCIM or TorchVision transform wrappers are being used,
# append the underlying transform to the name for more clarity
if "CuCIM" in name or "TorchVision" in name:
name = f"{name}_{obj.name}"
self.name_counter[name] += 1
self.name = f"{name}_{self.name_counter[name]}"
if self.name_counter[name] > 1:
self.name = f"{name}_{self.name_counter[name]}"
else:
self.name = name

# Define the methods to be wrapped if not provided
if self.methods is None:
Expand Down
43 changes: 36 additions & 7 deletions tests/test_nvtx_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,25 @@

from monai.transforms import (
Compose,
CuCIM,
Flip,
FlipD,
RandAdjustContrast,
RandCuCIM,
RandFlip,
Randomizable,
Rotate90,
ToCupy,
TorchVision,
ToTensor,
ToTensorD,
)
from monai.utils import Range, optional_import

_, has_nvtx = optional_import("torch._C._nvtx", descriptor="NVTX is not installed. Are you sure you have a CUDA build?")
_, has_cp = optional_import("cupy")
_, has_tvt = optional_import("torchvision.transforms")
_, has_cut = optional_import("cucim.core.operations.expose.transform")


TEST_CASE_ARRAY_0 = [np.random.randn(3, 3)]
Expand All @@ -40,10 +47,12 @@
TEST_CASE_TORCH_0 = [torch.randn(3, 3)]
TEST_CASE_TORCH_1 = [torch.randn(3, 10, 10)]

TEST_CASE_WRAPPER = [np.random.randn(3, 10, 10)]


@unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!")
class TestNVTXRangeDecorator(unittest.TestCase):
@parameterized.expand([TEST_CASE_ARRAY_0, TEST_CASE_ARRAY_1])
@unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!")
def test_tranform_array(self, input):
transforms = Compose([Range("random flip")(Flip()), Range()(ToTensor())])
# Apply transforms
Expand All @@ -65,11 +74,10 @@ def test_tranform_array(self, input):
self.assertIsInstance(output2, torch.Tensor)
self.assertIsInstance(output3, torch.Tensor)
np.testing.assert_equal(output.numpy(), output1.numpy())
np.testing.assert_equal(output.numpy(), output1.numpy())
np.testing.assert_equal(output.numpy(), output2.numpy())
np.testing.assert_equal(output.numpy(), output3.numpy())

@parameterized.expand([TEST_CASE_DICT_0, TEST_CASE_DICT_1])
@unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!")
def test_tranform_dict(self, input):
transforms = Compose([Range("random flip dict")(FlipD(keys="image")), Range()(ToTensorD("image"))])
# Apply transforms
Expand All @@ -94,8 +102,32 @@ def test_tranform_dict(self, input):
np.testing.assert_equal(output.numpy(), output2.numpy())
np.testing.assert_equal(output.numpy(), output3.numpy())

@parameterized.expand([TEST_CASE_WRAPPER])
@unittest.skipUnless(has_cp, "Requires CuPy.")
@unittest.skipUnless(has_cut, "Requires cuCIM transforms.")
@unittest.skipUnless(has_tvt, "Requires torchvision transforms.")
def test_wrapper_tranforms(self, input):
transform_list = [
ToTensor(),
TorchVision(name="RandomHorizontalFlip", p=1.0),
ToCupy(),
CuCIM(name="image_flip", spatial_axis=-1),
RandCuCIM(name="rand_image_rotate_90", prob=1.0, max_k=1, spatial_axis=(-2, -1)),
]

transforms = Compose(transform_list)
transforms_range = Compose([Range()(t) for t in transform_list])

# Apply transforms
output = transforms(input)

# Apply transforms with Range
output_r = transforms_range(input)

# Check the outputs
np.testing.assert_equal(output.get(), output_r.get())

@parameterized.expand([TEST_CASE_ARRAY_1])
@unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!")
def test_tranform_randomized(self, input):
# Compose deterministic and randomized transforms
transforms = Compose(
Expand Down Expand Up @@ -136,7 +168,6 @@ def test_tranform_randomized(self, input):
break

@parameterized.expand([TEST_CASE_TORCH_0, TEST_CASE_TORCH_1])
@unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!")
def test_network(self, input):
# Create a network
model = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Sigmoid())
Expand Down Expand Up @@ -164,7 +195,6 @@ def test_network(self, input):
np.testing.assert_equal(output.numpy(), output3.numpy())

@parameterized.expand([TEST_CASE_TORCH_0, TEST_CASE_TORCH_1])
@unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!")
def test_loss(self, input):
# Create a network and loss
model = torch.nn.Sigmoid()
Expand Down Expand Up @@ -194,7 +224,6 @@ def test_loss(self, input):
np.testing.assert_equal(output.numpy(), output2.numpy())
np.testing.assert_equal(output.numpy(), output3.numpy())

@unittest.skipUnless(has_nvtx, "CUDA is required for NVTX Range!")
def test_context_manager(self):
model = torch.nn.Sigmoid()
loss = torch.nn.BCELoss()
Expand Down