From 44db71c0772e5ef5758c38d0e4e8ad9995946c80 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Tue, 25 Nov 2025 09:14:49 -0800 Subject: [PATCH 1/5] implement additional cvcuda infra for all branches to avoid duplicate setup --- torchvision/transforms/v2/_transform.py | 4 ++-- torchvision/transforms/v2/_utils.py | 3 ++- .../transforms/v2/functional/__init__.py | 2 +- .../transforms/v2/functional/_augment.py | 11 ++++++++++- .../transforms/v2/functional/_color.py | 12 +++++++++++- .../transforms/v2/functional/_geometry.py | 19 +++++++++++++++++-- torchvision/transforms/v2/functional/_misc.py | 11 +++++++++-- .../transforms/v2/functional/_utils.py | 16 ++++++++++++++++ 8 files changed, 68 insertions(+), 10 deletions(-) diff --git a/torchvision/transforms/v2/_transform.py b/torchvision/transforms/v2/_transform.py index ac84fcb6c82..bec9ffcf714 100644 --- a/torchvision/transforms/v2/_transform.py +++ b/torchvision/transforms/v2/_transform.py @@ -11,7 +11,7 @@ from torchvision.transforms.v2._utils import check_type, has_any, is_pure_tensor from torchvision.utils import _log_api_usage_once -from .functional._utils import _get_kernel +from .functional._utils import _get_kernel, is_cvcuda_tensor class Transform(nn.Module): @@ -23,7 +23,7 @@ class Transform(nn.Module): # Class attribute defining transformed types. Other types are passed-through without any transformation # We support both Types and callables that are able to do further checks on the type of the input. - _transformed_types: tuple[type | Callable[[Any], bool], ...] = (torch.Tensor, PIL.Image.Image) + _transformed_types: tuple[type | Callable[[Any], bool], ...] = (torch.Tensor, PIL.Image.Image, is_cvcuda_tensor) def __init__(self) -> None: super().__init__() diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index bb6051b4e61..765a772fe41 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -15,7 +15,7 @@ from torchvision._utils import sequence_to_str from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401 -from torchvision.transforms.v2.functional import get_dimensions, get_size, is_pure_tensor +from torchvision.transforms.v2.functional import get_dimensions, get_size, is_cvcuda_tensor, is_pure_tensor from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT @@ -207,6 +207,7 @@ def query_size(flat_inputs: list[Any]) -> tuple[int, int]: tv_tensors.Mask, tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, + is_cvcuda_tensor, ), ) } diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index 032a993b1f0..52181e4624b 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -1,6 +1,6 @@ from torchvision.transforms import InterpolationMode # usort: skip -from ._utils import is_pure_tensor, register_kernel # usort: skip +from ._utils import is_pure_tensor, register_kernel, is_cvcuda_tensor # usort: skip from ._meta import ( clamp_bounding_boxes, diff --git a/torchvision/transforms/v2/functional/_augment.py b/torchvision/transforms/v2/functional/_augment.py index a904d8d7cbd..7ce5bdc7b7e 100644 --- a/torchvision/transforms/v2/functional/_augment.py +++ b/torchvision/transforms/v2/functional/_augment.py @@ -1,4 +1,5 @@ import io +from typing import TYPE_CHECKING import PIL.Image @@ -8,7 +9,15 @@ from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once -from ._utils import _get_kernel, _register_kernel_internal +from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal + + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def erase( diff --git a/torchvision/transforms/v2/functional/_color.py b/torchvision/transforms/v2/functional/_color.py index be254c0d63a..5be9c62902a 100644 --- a/torchvision/transforms/v2/functional/_color.py +++ b/torchvision/transforms/v2/functional/_color.py @@ -1,3 +1,5 @@ +from typing import TYPE_CHECKING + import PIL.Image import torch from torch.nn.functional import conv2d @@ -9,7 +11,15 @@ from ._misc import _num_value_bits, to_dtype_image from ._type_conversion import pil_to_tensor, to_pil_image -from ._utils import _get_kernel, _register_kernel_internal +from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal + + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def rgb_to_grayscale(inpt: torch.Tensor, num_output_channels: int = 1) -> torch.Tensor: diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 4fcb7fabe0d..c029488001c 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2,7 +2,7 @@ import numbers import warnings from collections.abc import Sequence -from typing import Any, Optional, Union +from typing import Any, Optional, TYPE_CHECKING, Union import PIL.Image import torch @@ -26,7 +26,22 @@ from ._meta import _get_size_image_pil, clamp_bounding_boxes, convert_bounding_box_format -from ._utils import _FillTypeJIT, _get_kernel, _register_five_ten_crop_kernel_internal, _register_kernel_internal +from ._utils import ( + _FillTypeJIT, + _get_kernel, + _import_cvcuda, + _is_cvcuda_available, + _register_five_ten_crop_kernel_internal, + _register_kernel_internal, +) + + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def _check_interpolation(interpolation: Union[InterpolationMode, int]) -> InterpolationMode: diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index daf263df046..0fa05a2113c 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -1,5 +1,5 @@ import math -from typing import Optional +from typing import Optional, TYPE_CHECKING import PIL.Image import torch @@ -13,7 +13,14 @@ from ._meta import _convert_bounding_box_format -from ._utils import _get_kernel, _register_kernel_internal, is_pure_tensor +from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal, is_pure_tensor + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def normalize( diff --git a/torchvision/transforms/v2/functional/_utils.py b/torchvision/transforms/v2/functional/_utils.py index ad1eddd258b..73fafaf7425 100644 --- a/torchvision/transforms/v2/functional/_utils.py +++ b/torchvision/transforms/v2/functional/_utils.py @@ -169,3 +169,19 @@ def _is_cvcuda_available(): return True except ImportError: return False + + +def is_cvcuda_tensor(inpt: Any) -> bool: + """ + Check if the input is a CVCUDA tensor. + + Args: + inpt: The input to check. + + Returns: + True if the input is a CV-CUDA tensor, False otherwise. + """ + if _is_cvcuda_available(): + cvcuda = _import_cvcuda() + return isinstance(inpt, cvcuda.Tensor) + return False From e3dd70022fa1c87aca7a9a98068b6e13e802a375 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Tue, 25 Nov 2025 09:26:19 -0800 Subject: [PATCH 2/5] update make_image_cvcuda to have default batch dim --- test/common_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 8c3c9dd58a8..e7bae60c41b 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -400,8 +400,9 @@ def make_image_pil(*args, **kwargs): return to_pil_image(make_image(*args, **kwargs)) -def make_image_cvcuda(*args, **kwargs): - return to_cvcuda_tensor(make_image(*args, **kwargs)) +def make_image_cvcuda(*args, batch_dims=(1,), **kwargs): + # explicitly default batch_dims to (1,) since to_cvcuda_tensor requires a batch dimension (ndims == 4) + return to_cvcuda_tensor(make_image(*args, batch_dims=batch_dims, **kwargs)) def make_keypoints(canvas_size=DEFAULT_SIZE, *, num_points=4, dtype=None, device="cpu"): From 80dc7ddb47a10d5598dd4f6b221f6bcfb95e63e2 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Tue, 25 Nov 2025 11:27:23 -0800 Subject: [PATCH 3/5] rgb to gray and gray to rgb done --- test/test_transforms_v2.py | 109 ++++++++++++++++-- torchvision/transforms/v2/_utils.py | 2 +- .../transforms/v2/functional/_color.py | 57 +++++++++ torchvision/transforms/v2/functional/_meta.py | 24 +++- 4 files changed, 181 insertions(+), 11 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 670a9d00ffb..aad1293446e 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -6357,7 +6357,17 @@ class TestRgbToGrayscale: def test_kernel_image(self, dtype, device): check_kernel(F.rgb_to_grayscale_image, make_image(dtype=dtype, device=device)) - @pytest.mark.parametrize("make_input", [make_image_tensor, make_image_pil, make_image]) + @pytest.mark.parametrize( + "make_input", + [ + make_image_tensor, + make_image_pil, + make_image, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available") + ), + ], + ) def test_functional(self, make_input): check_functional(F.rgb_to_grayscale, make_input()) @@ -6367,23 +6377,62 @@ def test_functional(self, make_input): (F.rgb_to_grayscale_image, torch.Tensor), (F._color._rgb_to_grayscale_image_pil, PIL.Image.Image), (F.rgb_to_grayscale_image, tv_tensors.Image), + pytest.param( + F._color._rgb_to_grayscale_cvcuda, + "cvcuda.Tensor", + marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available"), + ), ], ) def test_functional_signature(self, kernel, input_type): + if input_type == "cvcuda.Tensor": + input_type = _import_cvcuda().Tensor check_functional_kernel_signature_match(F.rgb_to_grayscale, kernel=kernel, input_type=input_type) @pytest.mark.parametrize("transform", [transforms.Grayscale(), transforms.RandomGrayscale(p=1)]) - @pytest.mark.parametrize("make_input", [make_image_tensor, make_image_pil, make_image]) + @pytest.mark.parametrize( + "make_input", + [ + make_image_tensor, + make_image_pil, + make_image, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available") + ), + ], + ) def test_transform(self, transform, make_input): + if make_input is make_image_cvcuda and isinstance(transform, transforms.RandomGrayscale): + pytest.skip("CV-CUDA does not support RandomGrayscale, will have num_output_channels == 3") check_transform(transform, make_input()) @pytest.mark.parametrize("num_output_channels", [1, 3]) @pytest.mark.parametrize("color_space", ["RGB", "GRAY"]) + @pytest.mark.parametrize( + "make_input", + [ + make_image, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available") + ), + ], + ) @pytest.mark.parametrize("fn", [F.rgb_to_grayscale, transform_cls_to_functional(transforms.Grayscale)]) - def test_image_correctness(self, num_output_channels, color_space, fn): - image = make_image(dtype=torch.uint8, device="cpu", color_space=color_space) + def test_image_correctness(self, num_output_channels, color_space, make_input, fn): + if make_input is make_image_cvcuda and num_output_channels == 3: + pytest.skip("CV-CUDA does not support num_output_channels == 3") + + image = make_input(dtype=torch.uint8, device="cpu", color_space=color_space) actual = fn(image, num_output_channels=num_output_channels) + + if make_input is make_image_cvcuda: + actual = F.cvcuda_to_tensor(actual).to(device="cpu") + actual = actual.squeeze(0) + # drop the batch dimension + image = F.cvcuda_to_tensor(image).to(device="cpu") + image = image.squeeze(0) + expected = F.to_image(F.rgb_to_grayscale(F.to_pil_image(image), num_output_channels=num_output_channels)) assert_equal(actual, expected, rtol=0, atol=1) @@ -6421,7 +6470,17 @@ class TestGrayscaleToRgb: def test_kernel_image(self, dtype, device): check_kernel(F.grayscale_to_rgb_image, make_image(dtype=dtype, device=device)) - @pytest.mark.parametrize("make_input", [make_image_tensor, make_image_pil, make_image]) + @pytest.mark.parametrize( + "make_input", + [ + make_image_tensor, + make_image_pil, + make_image, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available") + ), + ], + ) def test_functional(self, make_input): check_functional(F.grayscale_to_rgb, make_input()) @@ -6431,20 +6490,54 @@ def test_functional(self, make_input): (F.rgb_to_grayscale_image, torch.Tensor), (F._color._rgb_to_grayscale_image_pil, PIL.Image.Image), (F.rgb_to_grayscale_image, tv_tensors.Image), + pytest.param( + F._color._rgb_to_grayscale_cvcuda, + "cvcuda.Tensor", + marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available"), + ), ], ) def test_functional_signature(self, kernel, input_type): + if input_type == "cvcuda.Tensor": + input_type = _import_cvcuda().Tensor check_functional_kernel_signature_match(F.grayscale_to_rgb, kernel=kernel, input_type=input_type) - @pytest.mark.parametrize("make_input", [make_image_tensor, make_image_pil, make_image]) + @pytest.mark.parametrize( + "make_input", + [ + make_image_tensor, + make_image_pil, + make_image, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available") + ), + ], + ) def test_transform(self, make_input): check_transform(transforms.RGB(), make_input(color_space="GRAY")) + @pytest.mark.parametrize( + "make_input", + [ + make_image, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available") + ), + ], + ) @pytest.mark.parametrize("fn", [F.grayscale_to_rgb, transform_cls_to_functional(transforms.RGB)]) - def test_image_correctness(self, fn): - image = make_image(dtype=torch.uint8, device="cpu", color_space="GRAY") + def test_image_correctness(self, make_input, fn): + image = make_input(dtype=torch.uint8, device="cpu", color_space="GRAY") actual = fn(image) + + if make_input is make_image_cvcuda: + actual = F.cvcuda_to_tensor(actual).to(device="cpu") + actual = actual.squeeze(0) + # drop the batch dimension + image = F.cvcuda_to_tensor(image).to(device="cpu") + image = image.squeeze(0) + expected = F.to_image(F.grayscale_to_rgb(F.to_pil_image(image))) assert_equal(actual, expected, rtol=0, atol=1) diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index 765a772fe41..3fc33ce5964 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -182,7 +182,7 @@ def query_chw(flat_inputs: list[Any]) -> tuple[int, int, int]: chws = { tuple(get_dimensions(inpt)) for inpt in flat_inputs - if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video)) + if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video, is_cvcuda_tensor)) } if not chws: raise TypeError("No image or video was found in the sample") diff --git a/torchvision/transforms/v2/functional/_color.py b/torchvision/transforms/v2/functional/_color.py index 5be9c62902a..54b71a366e3 100644 --- a/torchvision/transforms/v2/functional/_color.py +++ b/torchvision/transforms/v2/functional/_color.py @@ -73,6 +73,38 @@ def _rgb_to_grayscale_image_pil(image: PIL.Image.Image, num_output_channels: int return _FP.to_grayscale(image, num_output_channels=num_output_channels) +def _rgb_to_grayscale_cvcuda( + image: "cvcuda.Tensor", + num_output_channels: int = 1, +) -> "cvcuda.Tensor": + cvcuda = _import_cvcuda() + + if num_output_channels not in (1, 3): + raise ValueError(f"num_output_channels must be 1 or 3, got {num_output_channels}.") + + if num_output_channels == 3: + raise ValueError("num_output_channels must be 1 for CV-CUDA, got 3.") + + if image.shape[3] == 1: + # if we already have a single channel, just clone the tensor + # we will use copymakeborder since CV-CUDA has no native clone + return cvcuda.copymakeborder( + image, + border_mode=cvcuda.Border.CONSTANT, + border_value=[0], + top=0, + left=0, + bottom=0, + right=0, + ) + + return cvcuda.cvtcolor(image, cvcuda.ColorConversion.RGB2GRAY) + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(rgb_to_grayscale, _import_cvcuda().Tensor)(_rgb_to_grayscale_cvcuda) + + def grayscale_to_rgb(inpt: torch.Tensor) -> torch.Tensor: """See :class:`~torchvision.transforms.v2.RGB` for details.""" if torch.jit.is_scripting(): @@ -99,6 +131,31 @@ def grayscale_to_rgb_image_pil(image: PIL.Image.Image) -> PIL.Image.Image: return image.convert(mode="RGB") +def _grayscale_to_rgb_cvcuda( + image: "cvcuda.Tensor", +) -> "cvcuda.Tensor": + cvcuda = _import_cvcuda() + + if image.shape[3] == 3: + # if we already have RGB channels, just clone the tensor + # we will use copymakeborder since CV-CUDA has no native clone + return cvcuda.copymakeborder( + image, + border_mode=cvcuda.Border.CONSTANT, + border_value=[0], + top=0, + left=0, + bottom=0, + right=0, + ) + + return cvcuda.cvtcolor(image, cvcuda.ColorConversion.GRAY2RGB) + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(grayscale_to_rgb, _import_cvcuda().Tensor)(_grayscale_to_rgb_cvcuda) + + def _blend(image1: torch.Tensor, image2: torch.Tensor, ratio: float) -> torch.Tensor: ratio = float(ratio) fp = image1.is_floating_point() diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 6b8f19f12f4..1bccfe0fc7f 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -51,6 +51,16 @@ def get_dimensions_video(video: torch.Tensor) -> list[int]: return get_dimensions_image(video) +def _get_dimensions_cvcuda(image: "cvcuda.Tensor") -> list[int]: + # CV-CUDA tensor is always in NHWC layout + # get_dimensions is CHW + return [image.shape[3], image.shape[1], image.shape[2]] + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(get_dimensions, cvcuda.Tensor)(_get_dimensions_cvcuda) + + def get_num_channels(inpt: torch.Tensor) -> int: if torch.jit.is_scripting(): return get_num_channels_image(inpt) @@ -87,6 +97,16 @@ def get_num_channels_video(video: torch.Tensor) -> int: get_image_num_channels = get_num_channels +def _get_num_channels_cvcuda(image: "cvcuda.Tensor") -> int: + # CV-CUDA tensor is always in NHWC layout + # get_num_channels is C + return image.shape[3] + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(get_num_channels, cvcuda.Tensor)(_get_num_channels_cvcuda) + + def get_size(inpt: torch.Tensor) -> list[int]: if torch.jit.is_scripting(): return get_size_image(inpt) @@ -114,7 +134,7 @@ def _get_size_image_pil(image: PIL.Image.Image) -> list[int]: return [height, width] -def get_size_image_cvcuda(image: "cvcuda.Tensor") -> list[int]: +def _get_size_cvcuda(image: "cvcuda.Tensor") -> list[int]: """Get size of `cvcuda.Tensor` with NHWC layout.""" hw = list(image.shape[-3:-1]) ndims = len(hw) @@ -125,7 +145,7 @@ def get_size_image_cvcuda(image: "cvcuda.Tensor") -> list[int]: if CVCUDA_AVAILABLE: - _get_size_image_cvcuda = _register_kernel_internal(get_size, cvcuda.Tensor)(get_size_image_cvcuda) + _register_kernel_internal(get_size, _import_cvcuda().Tensor)(_get_size_cvcuda) @_register_kernel_internal(get_size, tv_tensors.Video, tv_tensor_wrapper=False) From d80fc3b9d5e829eb487c7b7755dd213adb21f3e1 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Wed, 26 Nov 2025 16:07:33 -0800 Subject: [PATCH 4/5] simplify PIL comparisions --- test/common_utils.py | 25 ++++++++++++++++++++++++- test/test_transforms_v2.py | 13 +++---------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index e7bae60c41b..93f285e5905 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -20,13 +20,15 @@ from torch.testing._comparison import BooleanPair, NonePair, not_close_error_metas, NumberPair, TensorLikePair from torchvision import io, tv_tensors from torchvision.transforms._functional_tensor import _max_value as get_max_value -from torchvision.transforms.v2.functional import to_cvcuda_tensor, to_image, to_pil_image +from torchvision.transforms.v2.functional import cvcuda_to_tensor, to_cvcuda_tensor, to_image, to_pil_image +from torchvision.transforms.v2.functional._utils import _import_cvcuda, _is_cvcuda_available from torchvision.utils import _Image_fromarray IN_OSS_CI = any(os.getenv(var) == "true" for var in ["CIRCLECI", "GITHUB_ACTIONS"]) IN_RE_WORKER = os.environ.get("INSIDE_RE_WORKER") is not None IN_FBCODE = os.environ.get("IN_FBCODE_TORCHVISION") == "1" +CVCUDA_AVAILABLE = _is_cvcuda_available() CUDA_NOT_AVAILABLE_MSG = "CUDA device not available" MPS_NOT_AVAILABLE_MSG = "MPS device not available" OSS_CI_GPU_NO_CUDA_MSG = "We're in an OSS GPU machine, and this test doesn't need cuda." @@ -275,6 +277,17 @@ def combinations_grid(**kwargs): return [dict(zip(kwargs.keys(), values)) for values in itertools.product(*kwargs.values())] +def cvcuda_to_pil_compatible_tensor(tensor): + tensor = cvcuda_to_tensor(tensor) + if tensor.ndim != 4: + raise ValueError(f"CV-CUDA Tensor should be 4 dimensional. Got {tensor.ndim} dimensions.") + if tensor.shape[0] != 1: + raise ValueError( + f"CV-CUDA Tensor should have batch dimension 1 for comparison with PIL.Image.Image. Got {tensor.shape[0]}." + ) + return tensor.squeeze(0).cpu() + + class ImagePair(TensorLikePair): def __init__( self, @@ -287,6 +300,16 @@ def __init__( if all(isinstance(input, PIL.Image.Image) for input in [actual, expected]): actual, expected = (to_image(input) for input in [actual, expected]) + if CVCUDA_AVAILABLE and all(isinstance(input, _import_cvcuda().Tensor) for input in [actual, expected]): + actual, expected = (cvcuda_to_tensor(input) for input in [actual, expected]) + + if CVCUDA_AVAILABLE and isinstance(actual, _import_cvcuda().Tensor) and isinstance(expected, PIL.Image.Image): + actual = cvcuda_to_pil_compatible_tensor(actual) + expected = to_image(expected) + + if CVCUDA_AVAILABLE and isinstance(actual, _import_cvcuda().Tensor): + actual = cvcuda_to_pil_compatible_tensor(actual) + super().__init__(actual, expected, **other_parameters) self.mae = mae diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index aad1293446e..08b1a0eb3f2 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -24,6 +24,7 @@ assert_equal, cache, cpu_and_cuda, + cvcuda_to_pil_compatible_tensor, freeze_rng_state, ignore_jit_no_profile_information_warning, make_bounding_boxes, @@ -6427,11 +6428,7 @@ def test_image_correctness(self, num_output_channels, color_space, make_input, f actual = fn(image, num_output_channels=num_output_channels) if make_input is make_image_cvcuda: - actual = F.cvcuda_to_tensor(actual).to(device="cpu") - actual = actual.squeeze(0) - # drop the batch dimension - image = F.cvcuda_to_tensor(image).to(device="cpu") - image = image.squeeze(0) + image = cvcuda_to_pil_compatible_tensor(image) expected = F.to_image(F.rgb_to_grayscale(F.to_pil_image(image), num_output_channels=num_output_channels)) @@ -6532,11 +6529,7 @@ def test_image_correctness(self, make_input, fn): actual = fn(image) if make_input is make_image_cvcuda: - actual = F.cvcuda_to_tensor(actual).to(device="cpu") - actual = actual.squeeze(0) - # drop the batch dimension - image = F.cvcuda_to_tensor(image).to(device="cpu") - image = image.squeeze(0) + image = cvcuda_to_pil_compatible_tensor(image) expected = F.to_image(F.grayscale_to_rgb(F.to_pil_image(image))) From 78e892f637d1fa38ce07bbc0e181c72a6846dd5f Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Wed, 26 Nov 2025 16:12:56 -0800 Subject: [PATCH 5/5] use elif instead of separate ifs --- test/common_utils.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 93f285e5905..49f8c5044e4 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -299,15 +299,12 @@ def __init__( ): if all(isinstance(input, PIL.Image.Image) for input in [actual, expected]): actual, expected = (to_image(input) for input in [actual, expected]) - - if CVCUDA_AVAILABLE and all(isinstance(input, _import_cvcuda().Tensor) for input in [actual, expected]): + elif CVCUDA_AVAILABLE and all(isinstance(input, _import_cvcuda().Tensor) for input in [actual, expected]): actual, expected = (cvcuda_to_tensor(input) for input in [actual, expected]) - - if CVCUDA_AVAILABLE and isinstance(actual, _import_cvcuda().Tensor) and isinstance(expected, PIL.Image.Image): + elif CVCUDA_AVAILABLE and isinstance(actual, _import_cvcuda().Tensor) and isinstance(expected, PIL.Image.Image): actual = cvcuda_to_pil_compatible_tensor(actual) expected = to_image(expected) - - if CVCUDA_AVAILABLE and isinstance(actual, _import_cvcuda().Tensor): + elif CVCUDA_AVAILABLE and isinstance(actual, _import_cvcuda().Tensor): actual = cvcuda_to_pil_compatible_tensor(actual) super().__init__(actual, expected, **other_parameters)