From 44db71c0772e5ef5758c38d0e4e8ad9995946c80 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Tue, 25 Nov 2025 09:14:49 -0800 Subject: [PATCH 01/18] implement additional cvcuda infra for all branches to avoid duplicate setup --- torchvision/transforms/v2/_transform.py | 4 ++-- torchvision/transforms/v2/_utils.py | 3 ++- .../transforms/v2/functional/__init__.py | 2 +- .../transforms/v2/functional/_augment.py | 11 ++++++++++- .../transforms/v2/functional/_color.py | 12 +++++++++++- .../transforms/v2/functional/_geometry.py | 19 +++++++++++++++++-- torchvision/transforms/v2/functional/_misc.py | 11 +++++++++-- .../transforms/v2/functional/_utils.py | 16 ++++++++++++++++ 8 files changed, 68 insertions(+), 10 deletions(-) diff --git a/torchvision/transforms/v2/_transform.py b/torchvision/transforms/v2/_transform.py index ac84fcb6c82..bec9ffcf714 100644 --- a/torchvision/transforms/v2/_transform.py +++ b/torchvision/transforms/v2/_transform.py @@ -11,7 +11,7 @@ from torchvision.transforms.v2._utils import check_type, has_any, is_pure_tensor from torchvision.utils import _log_api_usage_once -from .functional._utils import _get_kernel +from .functional._utils import _get_kernel, is_cvcuda_tensor class Transform(nn.Module): @@ -23,7 +23,7 @@ class Transform(nn.Module): # Class attribute defining transformed types. Other types are passed-through without any transformation # We support both Types and callables that are able to do further checks on the type of the input. - _transformed_types: tuple[type | Callable[[Any], bool], ...] = (torch.Tensor, PIL.Image.Image) + _transformed_types: tuple[type | Callable[[Any], bool], ...] = (torch.Tensor, PIL.Image.Image, is_cvcuda_tensor) def __init__(self) -> None: super().__init__() diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index bb6051b4e61..765a772fe41 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -15,7 +15,7 @@ from torchvision._utils import sequence_to_str from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401 -from torchvision.transforms.v2.functional import get_dimensions, get_size, is_pure_tensor +from torchvision.transforms.v2.functional import get_dimensions, get_size, is_cvcuda_tensor, is_pure_tensor from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT @@ -207,6 +207,7 @@ def query_size(flat_inputs: list[Any]) -> tuple[int, int]: tv_tensors.Mask, tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, + is_cvcuda_tensor, ), ) } diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index 032a993b1f0..52181e4624b 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -1,6 +1,6 @@ from torchvision.transforms import InterpolationMode # usort: skip -from ._utils import is_pure_tensor, register_kernel # usort: skip +from ._utils import is_pure_tensor, register_kernel, is_cvcuda_tensor # usort: skip from ._meta import ( clamp_bounding_boxes, diff --git a/torchvision/transforms/v2/functional/_augment.py b/torchvision/transforms/v2/functional/_augment.py index a904d8d7cbd..7ce5bdc7b7e 100644 --- a/torchvision/transforms/v2/functional/_augment.py +++ b/torchvision/transforms/v2/functional/_augment.py @@ -1,4 +1,5 @@ import io +from typing import TYPE_CHECKING import PIL.Image @@ -8,7 +9,15 @@ from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once -from ._utils import _get_kernel, _register_kernel_internal +from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal + + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def erase( diff --git a/torchvision/transforms/v2/functional/_color.py b/torchvision/transforms/v2/functional/_color.py index be254c0d63a..5be9c62902a 100644 --- a/torchvision/transforms/v2/functional/_color.py +++ b/torchvision/transforms/v2/functional/_color.py @@ -1,3 +1,5 @@ +from typing import TYPE_CHECKING + import PIL.Image import torch from torch.nn.functional import conv2d @@ -9,7 +11,15 @@ from ._misc import _num_value_bits, to_dtype_image from ._type_conversion import pil_to_tensor, to_pil_image -from ._utils import _get_kernel, _register_kernel_internal +from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal + + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def rgb_to_grayscale(inpt: torch.Tensor, num_output_channels: int = 1) -> torch.Tensor: diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 4fcb7fabe0d..c029488001c 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2,7 +2,7 @@ import numbers import warnings from collections.abc import Sequence -from typing import Any, Optional, Union +from typing import Any, Optional, TYPE_CHECKING, Union import PIL.Image import torch @@ -26,7 +26,22 @@ from ._meta import _get_size_image_pil, clamp_bounding_boxes, convert_bounding_box_format -from ._utils import _FillTypeJIT, _get_kernel, _register_five_ten_crop_kernel_internal, _register_kernel_internal +from ._utils import ( + _FillTypeJIT, + _get_kernel, + _import_cvcuda, + _is_cvcuda_available, + _register_five_ten_crop_kernel_internal, + _register_kernel_internal, +) + + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def _check_interpolation(interpolation: Union[InterpolationMode, int]) -> InterpolationMode: diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index daf263df046..0fa05a2113c 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -1,5 +1,5 @@ import math -from typing import Optional +from typing import Optional, TYPE_CHECKING import PIL.Image import torch @@ -13,7 +13,14 @@ from ._meta import _convert_bounding_box_format -from ._utils import _get_kernel, _register_kernel_internal, is_pure_tensor +from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal, is_pure_tensor + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def normalize( diff --git a/torchvision/transforms/v2/functional/_utils.py b/torchvision/transforms/v2/functional/_utils.py index ad1eddd258b..73fafaf7425 100644 --- a/torchvision/transforms/v2/functional/_utils.py +++ b/torchvision/transforms/v2/functional/_utils.py @@ -169,3 +169,19 @@ def _is_cvcuda_available(): return True except ImportError: return False + + +def is_cvcuda_tensor(inpt: Any) -> bool: + """ + Check if the input is a CVCUDA tensor. + + Args: + inpt: The input to check. + + Returns: + True if the input is a CV-CUDA tensor, False otherwise. + """ + if _is_cvcuda_available(): + cvcuda = _import_cvcuda() + return isinstance(inpt, cvcuda.Tensor) + return False From e3dd70022fa1c87aca7a9a98068b6e13e802a375 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Tue, 25 Nov 2025 09:26:19 -0800 Subject: [PATCH 02/18] update make_image_cvcuda to have default batch dim --- test/common_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 8c3c9dd58a8..e7bae60c41b 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -400,8 +400,9 @@ def make_image_pil(*args, **kwargs): return to_pil_image(make_image(*args, **kwargs)) -def make_image_cvcuda(*args, **kwargs): - return to_cvcuda_tensor(make_image(*args, **kwargs)) +def make_image_cvcuda(*args, batch_dims=(1,), **kwargs): + # explicitly default batch_dims to (1,) since to_cvcuda_tensor requires a batch dimension (ndims == 4) + return to_cvcuda_tensor(make_image(*args, batch_dims=batch_dims, **kwargs)) def make_keypoints(canvas_size=DEFAULT_SIZE, *, num_points=4, dtype=None, device="cpu"): From c035df1c6eaebcad25604f8c298a7d9eaf86864b Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Mon, 1 Dec 2025 18:16:27 -0800 Subject: [PATCH 03/18] add stanardized setup to main for easier updating of PRs and branches --- test/common_utils.py | 21 ++++++++++++++-- test/test_transforms_v2.py | 2 +- torchvision/transforms/v2/_utils.py | 2 +- torchvision/transforms/v2/functional/_meta.py | 24 +++++++++++++++++-- 4 files changed, 43 insertions(+), 6 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index e7bae60c41b..3b889e93d2e 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -20,13 +20,15 @@ from torch.testing._comparison import BooleanPair, NonePair, not_close_error_metas, NumberPair, TensorLikePair from torchvision import io, tv_tensors from torchvision.transforms._functional_tensor import _max_value as get_max_value -from torchvision.transforms.v2.functional import to_cvcuda_tensor, to_image, to_pil_image +from torchvision.transforms.v2.functional import cvcuda_to_tensor, to_cvcuda_tensor, to_image, to_pil_image +from torchvision.transforms.v2.functional._utils import _import_cvcuda, _is_cvcuda_available from torchvision.utils import _Image_fromarray IN_OSS_CI = any(os.getenv(var) == "true" for var in ["CIRCLECI", "GITHUB_ACTIONS"]) IN_RE_WORKER = os.environ.get("INSIDE_RE_WORKER") is not None IN_FBCODE = os.environ.get("IN_FBCODE_TORCHVISION") == "1" +CVCUDA_AVAILABLE = _is_cvcuda_available() CUDA_NOT_AVAILABLE_MSG = "CUDA device not available" MPS_NOT_AVAILABLE_MSG = "MPS device not available" OSS_CI_GPU_NO_CUDA_MSG = "We're in an OSS GPU machine, and this test doesn't need cuda." @@ -275,6 +277,17 @@ def combinations_grid(**kwargs): return [dict(zip(kwargs.keys(), values)) for values in itertools.product(*kwargs.values())] +def cvcuda_to_pil_compatible_tensor(tensor: "cvcuda.Tensor") -> torch.Tensor: + tensor = cvcuda_to_tensor(tensor) + if tensor.ndim != 4: + raise ValueError(f"CV-CUDA Tensor should be 4 dimensional. Got {tensor.ndim} dimensions.") + if tensor.shape[0] != 1: + raise ValueError( + f"CV-CUDA Tensor should have batch dimension 1 for comparison with PIL.Image.Image. Got {tensor.shape[0]}." + ) + return tensor.squeeze(0).cpu() + + class ImagePair(TensorLikePair): def __init__( self, @@ -287,6 +300,11 @@ def __init__( if all(isinstance(input, PIL.Image.Image) for input in [actual, expected]): actual, expected = (to_image(input) for input in [actual, expected]) + # handle check for CV-CUDA Tensors + if CVCUDA_AVAILABLE and isinstance(actual, _import_cvcuda().Tensor): + # Use the PIL compatible tensor, so we can always compare with PIL.Image.Image + actual = cvcuda_to_pil_compatible_tensor(actual) + super().__init__(actual, expected, **other_parameters) self.mae = mae @@ -401,7 +419,6 @@ def make_image_pil(*args, **kwargs): def make_image_cvcuda(*args, batch_dims=(1,), **kwargs): - # explicitly default batch_dims to (1,) since to_cvcuda_tensor requires a batch dimension (ndims == 4) return to_cvcuda_tensor(make_image(*args, batch_dims=batch_dims, **kwargs)) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 670a9d00ffb..7eba65550da 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -21,6 +21,7 @@ import torchvision.transforms.v2 as transforms from common_utils import ( + assert_close, assert_equal, cache, cpu_and_cuda, @@ -41,7 +42,6 @@ ) from torch import nn -from torch.testing import assert_close from torch.utils._pytree import tree_flatten, tree_map from torch.utils.data import DataLoader, default_collate from torchvision import tv_tensors diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index 765a772fe41..3fc33ce5964 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -182,7 +182,7 @@ def query_chw(flat_inputs: list[Any]) -> tuple[int, int, int]: chws = { tuple(get_dimensions(inpt)) for inpt in flat_inputs - if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video)) + if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video, is_cvcuda_tensor)) } if not chws: raise TypeError("No image or video was found in the sample") diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 6b8f19f12f4..ee562cb2aee 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -51,6 +51,16 @@ def get_dimensions_video(video: torch.Tensor) -> list[int]: return get_dimensions_image(video) +def _get_dimensions_cvcuda(image: "cvcuda.Tensor") -> list[int]: + # CV-CUDA tensor is always in NHWC layout + # get_dimensions is CHW + return [image.shape[3], image.shape[1], image.shape[2]] + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(get_dimensions, cvcuda.Tensor)(_get_dimensions_cvcuda) + + def get_num_channels(inpt: torch.Tensor) -> int: if torch.jit.is_scripting(): return get_num_channels_image(inpt) @@ -87,6 +97,16 @@ def get_num_channels_video(video: torch.Tensor) -> int: get_image_num_channels = get_num_channels +def _get_num_channels_cvcuda(image: "cvcuda.Tensor") -> int: + # CV-CUDA tensor is always in NHWC layout + # get_num_channels is C + return image.shape[3] + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(get_num_channels, cvcuda.Tensor)(_get_num_channels_cvcuda) + + def get_size(inpt: torch.Tensor) -> list[int]: if torch.jit.is_scripting(): return get_size_image(inpt) @@ -114,7 +134,7 @@ def _get_size_image_pil(image: PIL.Image.Image) -> list[int]: return [height, width] -def get_size_image_cvcuda(image: "cvcuda.Tensor") -> list[int]: +def _get_size_cvcuda(image: "cvcuda.Tensor") -> list[int]: """Get size of `cvcuda.Tensor` with NHWC layout.""" hw = list(image.shape[-3:-1]) ndims = len(hw) @@ -125,7 +145,7 @@ def get_size_image_cvcuda(image: "cvcuda.Tensor") -> list[int]: if CVCUDA_AVAILABLE: - _get_size_image_cvcuda = _register_kernel_internal(get_size, cvcuda.Tensor)(get_size_image_cvcuda) + _register_kernel_internal(get_size, cvcuda.Tensor)(_get_size_cvcuda) @_register_kernel_internal(get_size, tv_tensors.Video, tv_tensor_wrapper=False) From 98d7dfb2059eaf2c10c3f549ea45f1d27875134c Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Mon, 1 Dec 2025 18:25:09 -0800 Subject: [PATCH 04/18] update is_cvcuda_tensor --- torchvision/transforms/v2/functional/_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/torchvision/transforms/v2/functional/_utils.py b/torchvision/transforms/v2/functional/_utils.py index 73fafaf7425..44b2edeaf2d 100644 --- a/torchvision/transforms/v2/functional/_utils.py +++ b/torchvision/transforms/v2/functional/_utils.py @@ -181,7 +181,8 @@ def is_cvcuda_tensor(inpt: Any) -> bool: Returns: True if the input is a CV-CUDA tensor, False otherwise. """ - if _is_cvcuda_available(): + try: cvcuda = _import_cvcuda() return isinstance(inpt, cvcuda.Tensor) - return False + except ImportError: + return False From ddc116d13febdae1d53507bcde9f103a4c14eba7 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Tue, 2 Dec 2025 12:37:03 -0800 Subject: [PATCH 05/18] add cvcuda to pil compatible to transforms by default --- test/test_transforms_v2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 7eba65550da..87166477669 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -25,6 +25,7 @@ assert_equal, cache, cpu_and_cuda, + cvcuda_to_pil_compatible_tensor, freeze_rng_state, ignore_jit_no_profile_information_warning, make_bounding_boxes, From e51dc7eabd254261347245f4492892fd0944aae5 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Tue, 2 Dec 2025 12:46:23 -0800 Subject: [PATCH 06/18] remove cvcuda from transform class --- torchvision/transforms/v2/_transform.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/transforms/v2/_transform.py b/torchvision/transforms/v2/_transform.py index bec9ffcf714..ac84fcb6c82 100644 --- a/torchvision/transforms/v2/_transform.py +++ b/torchvision/transforms/v2/_transform.py @@ -11,7 +11,7 @@ from torchvision.transforms.v2._utils import check_type, has_any, is_pure_tensor from torchvision.utils import _log_api_usage_once -from .functional._utils import _get_kernel, is_cvcuda_tensor +from .functional._utils import _get_kernel class Transform(nn.Module): @@ -23,7 +23,7 @@ class Transform(nn.Module): # Class attribute defining transformed types. Other types are passed-through without any transformation # We support both Types and callables that are able to do further checks on the type of the input. - _transformed_types: tuple[type | Callable[[Any], bool], ...] = (torch.Tensor, PIL.Image.Image, is_cvcuda_tensor) + _transformed_types: tuple[type | Callable[[Any], bool], ...] = (torch.Tensor, PIL.Image.Image) def __init__(self) -> None: super().__init__() From 4939355a2c7421eeba95d7f155fe7953066aec6d Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Thu, 4 Dec 2025 11:07:08 -0800 Subject: [PATCH 07/18] resolve more formatting naming --- torchvision/transforms/v2/functional/__init__.py | 2 +- torchvision/transforms/v2/functional/_meta.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index 52181e4624b..032a993b1f0 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -1,6 +1,6 @@ from torchvision.transforms import InterpolationMode # usort: skip -from ._utils import is_pure_tensor, register_kernel, is_cvcuda_tensor # usort: skip +from ._utils import is_pure_tensor, register_kernel # usort: skip from ._meta import ( clamp_bounding_boxes, diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index e8630f788ca..af03ad018d4 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -51,14 +51,14 @@ def get_dimensions_video(video: torch.Tensor) -> list[int]: return get_dimensions_image(video) -def _get_dimensions_cvcuda(image: "cvcuda.Tensor") -> list[int]: +def get_dimensions_image_cvcuda(image: "cvcuda.Tensor") -> list[int]: # CV-CUDA tensor is always in NHWC layout # get_dimensions is CHW return [image.shape[3], image.shape[1], image.shape[2]] if CVCUDA_AVAILABLE: - _register_kernel_internal(get_dimensions, cvcuda.Tensor)(_get_dimensions_cvcuda) + _register_kernel_internal(get_dimensions, cvcuda.Tensor)(get_dimensions_image_cvcuda) def get_num_channels(inpt: torch.Tensor) -> int: @@ -97,14 +97,14 @@ def get_num_channels_video(video: torch.Tensor) -> int: get_image_num_channels = get_num_channels -def _get_num_channels_cvcuda(image: "cvcuda.Tensor") -> int: +def get_num_channels_image_cvcuda(image: "cvcuda.Tensor") -> int: # CV-CUDA tensor is always in NHWC layout # get_num_channels is C return image.shape[3] if CVCUDA_AVAILABLE: - _register_kernel_internal(get_num_channels, cvcuda.Tensor)(_get_num_channels_cvcuda) + _register_kernel_internal(get_num_channels, cvcuda.Tensor)(get_num_channels_image_cvcuda) def get_size(inpt: torch.Tensor) -> list[int]: From ac82cea8f5bb038578b0f46d4a40c0b6bd772ded Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Mon, 17 Nov 2025 16:14:16 -0800 Subject: [PATCH 08/18] draft initial gaussian_blur cvcuda kernel implementation --- test/test_transforms_v2.py | 64 +++++++++++++++++++ .../transforms/v2/functional/__init__.py | 1 + torchvision/transforms/v2/functional/_misc.py | 40 ++++++++++-- 3 files changed, 99 insertions(+), 6 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index f767e211125..3e9cda02226 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -4048,6 +4048,70 @@ def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtyp torch.testing.assert_close(actual, expected, rtol=0, atol=1) +@pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") +@needs_cuda +class TestGaussianBlurCVCUDA: + def test_kernel_image_errors(self): + image = make_image_cvcuda(batch_dims=(1,)) + + with pytest.raises(ValueError, match="kernel_size is a sequence its length should be 2"): + F.gaussian_blur_cvcuda(image, kernel_size=[1, 2, 3]) + + for kernel_size in [2, -1]: + with pytest.raises(ValueError, match="kernel_size should have odd and positive integers"): + F.gaussian_blur_cvcuda(image, kernel_size=kernel_size) + + with pytest.raises(ValueError, match="sigma is a sequence, its length should be 2"): + F.gaussian_blur_cvcuda(image, kernel_size=1, sigma=[1, 2, 3]) + + with pytest.raises(TypeError, match="sigma should be either float or sequence of floats"): + F.gaussian_blur_cvcuda(image, kernel_size=1, sigma=object()) + + with pytest.raises(ValueError, match="sigma should have positive values"): + F.gaussian_blur_cvcuda(image, kernel_size=1, sigma=-1) + + def test_functional(self): + check_functional(F.gaussian_blur, make_image_cvcuda(batch_dims=(1,)), kernel_size=(3, 3)) + + @pytest.mark.parametrize("device", cpu_and_cuda()) + @pytest.mark.parametrize("sigma", [5, 2.0, (0.5, 2), [1.3, 2.7]]) + def test_transform(self, device, sigma): + check_transform( + transforms.GaussianBlur(kernel_size=3, sigma=sigma), make_image_cvcuda(batch_dims=(1,), device=device) + ) + + @pytest.mark.parametrize( + ("dimensions", "kernel_size", "sigma"), + [ + ((10, 12), (3, 3), 0.8), + ((10, 12), (3, 3), 0.5), + ((10, 12), (3, 5), 0.8), + ((10, 12), (3, 5), 0.5), + ((26, 28), (23, 23), 1.7), + ], + ) + @pytest.mark.parametrize("color_space", ["RGB", "GRAY"]) + @pytest.mark.parametrize("batch_dims", [(1,), (2,), (4,)]) + @pytest.mark.parametrize("dtype", [torch.uint8, torch.float32]) + def test_functional_image_correctness(self, dimensions, kernel_size, sigma, color_space, batch_dims, dtype): + height, width = dimensions + + image_tensor = make_image( + size=(height, width), color_space=color_space, batch_dims=batch_dims, dtype=dtype, device="cuda" + ) + image_cvcuda = F.to_cvcuda_tensor(image_tensor) + + expected = F.gaussian_blur_image(image_tensor, kernel_size=kernel_size, sigma=sigma) + actual = F.gaussian_blur_cvcuda(image_cvcuda, kernel_size=kernel_size, sigma=sigma) + actual_torch = F.cvcuda_to_tensor(actual) + + if dtype.is_floating_point: + torch.testing.assert_close(actual_torch, expected, rtol=0, atol=0.3) + else: + # uint8/16 gaussians can differ by up to max-value, most likely an overflow issue + torch.testing.assert_close(actual_torch, expected, rtol=0, atol=get_max_value(dtype)) + + class TestGaussianNoise: @pytest.mark.parametrize( "make_input", diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index 032a993b1f0..e948118a8bc 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -147,6 +147,7 @@ from ._misc import ( convert_image_dtype, gaussian_blur, + gaussian_blur_cvcuda, gaussian_blur_image, gaussian_blur_video, gaussian_noise, diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index 0fa05a2113c..eb3773e47d6 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -1,5 +1,5 @@ import math -from typing import Optional, TYPE_CHECKING +from typing import Optional, Sequence, TYPE_CHECKING import PIL.Image import torch @@ -106,11 +106,10 @@ def _get_gaussian_kernel2d( return kernel2d -@_register_kernel_internal(gaussian_blur, torch.Tensor) -@_register_kernel_internal(gaussian_blur, tv_tensors.Image) -def gaussian_blur_image( - image: torch.Tensor, kernel_size: list[int], sigma: Optional[list[float]] = None -) -> torch.Tensor: +def _validate_kernel_size_and_sigma( + kernel_size: Sequence[int] | int, + sigma: Sequence[float | int] | float | int | None = None, +) -> tuple[list[int], list[float]]: # TODO: consider deprecating integers from sigma on the future if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] @@ -139,6 +138,16 @@ def gaussian_blur_image( if s <= 0.0: raise ValueError(f"sigma should have positive values. Got {sigma}") + return kernel_size, sigma + + +@_register_kernel_internal(gaussian_blur, torch.Tensor) +@_register_kernel_internal(gaussian_blur, tv_tensors.Image) +def gaussian_blur_image( + image: torch.Tensor, kernel_size: list[int], sigma: Optional[list[float]] = None +) -> torch.Tensor: + kernel_size, sigma = _validate_kernel_size_and_sigma(kernel_size, sigma) + if image.numel() == 0: return image @@ -188,6 +197,25 @@ def gaussian_blur_video( return gaussian_blur_image(video, kernel_size, sigma) +def gaussian_blur_cvcuda( + image: "cvcuda.Tensor", + kernel_size: Sequence[int] | int, + sigma: Sequence[float | int] | float | int | None = None, +) -> "cvcuda.Tensor": + kernel_size, sigma = _validate_kernel_size_and_sigma(kernel_size, sigma) + + return cvcuda.gaussian( + image, + tuple(kernel_size), + tuple(sigma), + border=cvcuda.Border.REFLECT, + ) + + +if CVCUDA_AVAILABLE: + _gaussian_blur_cvcuda = _register_kernel_internal(gaussian_blur, cvcuda.Tensor)(gaussian_blur_cvcuda) + + def gaussian_noise(inpt: torch.Tensor, mean: float = 0.0, sigma: float = 0.1, clip: bool = True) -> torch.Tensor: """See :class:`~torchvision.transforms.v2.GaussianNoise`""" if torch.jit.is_scripting(): From b18fedf678c1975ecb39935b75297603514a1ed9 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Thu, 20 Nov 2025 09:00:21 -0800 Subject: [PATCH 09/18] fix: move cvcuda tests to centralized class, more guards againist no import, explicit imports in func --- test/test_transforms_v2.py | 66 ++++++++----------- .../transforms/v2/functional/__init__.py | 1 - torchvision/transforms/v2/functional/_misc.py | 42 ++++++++++-- 3 files changed, 64 insertions(+), 45 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 3e9cda02226..b08c2d3fcd5 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -3937,7 +3937,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_video, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA is not available") + ), + ], ) def test_functional(self, make_input): check_functional(F.gaussian_blur, make_input(), kernel_size=(3, 3)) @@ -3949,6 +3957,11 @@ def test_functional(self, make_input): (F._misc._gaussian_blur_image_pil, PIL.Image.Image), (F.gaussian_blur_image, tv_tensors.Image), (F.gaussian_blur_video, tv_tensors.Video), + pytest.param( + F._misc._gaussian_blur_cvcuda, + cvcuda.Tensor, + marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA is not available"), + ), ], ) def test_functional_signature(self, kernel, input_type): @@ -3956,7 +3969,17 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA is not available") + ), + ], ) @pytest.mark.parametrize("device", cpu_and_cuda()) @pytest.mark.parametrize("sigma", [5, 2.0, (0.5, 2), [1.3, 2.7]]) @@ -4047,39 +4070,8 @@ def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtyp torch.testing.assert_close(actual, expected, rtol=0, atol=1) - -@pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") -@needs_cuda -class TestGaussianBlurCVCUDA: - def test_kernel_image_errors(self): - image = make_image_cvcuda(batch_dims=(1,)) - - with pytest.raises(ValueError, match="kernel_size is a sequence its length should be 2"): - F.gaussian_blur_cvcuda(image, kernel_size=[1, 2, 3]) - - for kernel_size in [2, -1]: - with pytest.raises(ValueError, match="kernel_size should have odd and positive integers"): - F.gaussian_blur_cvcuda(image, kernel_size=kernel_size) - - with pytest.raises(ValueError, match="sigma is a sequence, its length should be 2"): - F.gaussian_blur_cvcuda(image, kernel_size=1, sigma=[1, 2, 3]) - - with pytest.raises(TypeError, match="sigma should be either float or sequence of floats"): - F.gaussian_blur_cvcuda(image, kernel_size=1, sigma=object()) - - with pytest.raises(ValueError, match="sigma should have positive values"): - F.gaussian_blur_cvcuda(image, kernel_size=1, sigma=-1) - - def test_functional(self): - check_functional(F.gaussian_blur, make_image_cvcuda(batch_dims=(1,)), kernel_size=(3, 3)) - - @pytest.mark.parametrize("device", cpu_and_cuda()) - @pytest.mark.parametrize("sigma", [5, 2.0, (0.5, 2), [1.3, 2.7]]) - def test_transform(self, device, sigma): - check_transform( - transforms.GaussianBlur(kernel_size=3, sigma=sigma), make_image_cvcuda(batch_dims=(1,), device=device) - ) - + @pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") + @needs_cuda @pytest.mark.parametrize( ("dimensions", "kernel_size", "sigma"), [ @@ -4093,7 +4085,7 @@ def test_transform(self, device, sigma): @pytest.mark.parametrize("color_space", ["RGB", "GRAY"]) @pytest.mark.parametrize("batch_dims", [(1,), (2,), (4,)]) @pytest.mark.parametrize("dtype", [torch.uint8, torch.float32]) - def test_functional_image_correctness(self, dimensions, kernel_size, sigma, color_space, batch_dims, dtype): + def test_functional_cvcuda_parity(self, dimensions, kernel_size, sigma, color_space, batch_dims, dtype): height, width = dimensions image_tensor = make_image( @@ -4102,7 +4094,7 @@ def test_functional_image_correctness(self, dimensions, kernel_size, sigma, colo image_cvcuda = F.to_cvcuda_tensor(image_tensor) expected = F.gaussian_blur_image(image_tensor, kernel_size=kernel_size, sigma=sigma) - actual = F.gaussian_blur_cvcuda(image_cvcuda, kernel_size=kernel_size, sigma=sigma) + actual = F._misc._gaussian_blur_cvcuda(image_cvcuda, kernel_size=kernel_size, sigma=sigma) actual_torch = F.cvcuda_to_tensor(actual) if dtype.is_floating_point: diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index e948118a8bc..032a993b1f0 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -147,7 +147,6 @@ from ._misc import ( convert_image_dtype, gaussian_blur, - gaussian_blur_cvcuda, gaussian_blur_image, gaussian_blur_video, gaussian_noise, diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index eb3773e47d6..59b11da43f5 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -110,7 +110,7 @@ def _validate_kernel_size_and_sigma( kernel_size: Sequence[int] | int, sigma: Sequence[float | int] | float | int | None = None, ) -> tuple[list[int], list[float]]: - # TODO: consider deprecating integers from sigma on the future + # duplicated logic from gaussian_blur_image for use in gaussian_blur_cvcuda if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] elif len(kernel_size) != 2: @@ -146,7 +146,33 @@ def _validate_kernel_size_and_sigma( def gaussian_blur_image( image: torch.Tensor, kernel_size: list[int], sigma: Optional[list[float]] = None ) -> torch.Tensor: - kernel_size, sigma = _validate_kernel_size_and_sigma(kernel_size, sigma) + # TODO: consider deprecating integers from sigma on the future + if isinstance(kernel_size, int): + kernel_size = [kernel_size, kernel_size] + elif len(kernel_size) != 2: + raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}") + for ksize in kernel_size: + if ksize % 2 == 0 or ksize < 0: + raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}") + + if sigma is None: + sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size] + else: + if isinstance(sigma, (list, tuple)): + length = len(sigma) + if length == 1: + s = sigma[0] + sigma = [s, s] + elif length != 2: + raise ValueError(f"If sigma is a sequence, its length should be 2. Got {length}") + elif isinstance(sigma, (int, float)): + s = float(sigma) + sigma = [s, s] + else: + raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}") + for s in sigma: + if s <= 0.0: + raise ValueError(f"sigma should have positive values. Got {sigma}") if image.numel() == 0: return image @@ -197,11 +223,11 @@ def gaussian_blur_video( return gaussian_blur_image(video, kernel_size, sigma) -def gaussian_blur_cvcuda( - image: "cvcuda.Tensor", - kernel_size: Sequence[int] | int, - sigma: Sequence[float | int] | float | int | None = None, +def _gaussian_blur_cvcuda( + image: "cvcuda.Tensor", kernel_size: list[int], sigma: Optional[list[float]] = None ) -> "cvcuda.Tensor": + cvcuda = _import_cvcuda() + kernel_size, sigma = _validate_kernel_size_and_sigma(kernel_size, sigma) return cvcuda.gaussian( @@ -213,7 +239,9 @@ def gaussian_blur_cvcuda( if CVCUDA_AVAILABLE: - _gaussian_blur_cvcuda = _register_kernel_internal(gaussian_blur, cvcuda.Tensor)(gaussian_blur_cvcuda) + _gaussian_blur_cvcuda_registered = _register_kernel_internal(gaussian_blur, _import_cvcuda().Tensor)( + _gaussian_blur_cvcuda + ) def gaussian_noise(inpt: torch.Tensor, mean: float = 0.0, sigma: float = 0.1, clip: bool = True) -> torch.Tensor: From 5df3a7d7c6be7b7f2712bd51741aa87afa935a28 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Mon, 24 Nov 2025 12:37:53 -0800 Subject: [PATCH 10/18] consolidate gaussian_blur_image to use new validate_kernel_size_and_sigma --- torchvision/transforms/v2/functional/_misc.py | 36 +++---------------- 1 file changed, 5 insertions(+), 31 deletions(-) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index 59b11da43f5..db5c86fa424 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -1,5 +1,5 @@ import math -from typing import Optional, Sequence, TYPE_CHECKING +from typing import List, Optional, TYPE_CHECKING, Union import PIL.Image import torch @@ -107,10 +107,10 @@ def _get_gaussian_kernel2d( def _validate_kernel_size_and_sigma( - kernel_size: Sequence[int] | int, - sigma: Sequence[float | int] | float | int | None = None, + kernel_size: List[int] | int, + sigma: Optional[Union[List[float], float, int]] = None, ) -> tuple[list[int], list[float]]: - # duplicated logic from gaussian_blur_image for use in gaussian_blur_cvcuda + # TODO: consider deprecating integers from sigma on the future if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] elif len(kernel_size) != 2: @@ -146,33 +146,7 @@ def _validate_kernel_size_and_sigma( def gaussian_blur_image( image: torch.Tensor, kernel_size: list[int], sigma: Optional[list[float]] = None ) -> torch.Tensor: - # TODO: consider deprecating integers from sigma on the future - if isinstance(kernel_size, int): - kernel_size = [kernel_size, kernel_size] - elif len(kernel_size) != 2: - raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}") - for ksize in kernel_size: - if ksize % 2 == 0 or ksize < 0: - raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}") - - if sigma is None: - sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size] - else: - if isinstance(sigma, (list, tuple)): - length = len(sigma) - if length == 1: - s = sigma[0] - sigma = [s, s] - elif length != 2: - raise ValueError(f"If sigma is a sequence, its length should be 2. Got {length}") - elif isinstance(sigma, (int, float)): - s = float(sigma) - sigma = [s, s] - else: - raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}") - for s in sigma: - if s <= 0.0: - raise ValueError(f"sigma should have positive values. Got {sigma}") + kernel_size, sigma = _validate_kernel_size_and_sigma(kernel_size, sigma) if image.numel() == 0: return image From 9cd75823f7288a060bf8f9fc774c0336379feff6 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Mon, 24 Nov 2025 12:49:47 -0800 Subject: [PATCH 11/18] resolve more review comments --- test/test_transforms_v2.py | 5 ++++- torchvision/transforms/v2/functional/_misc.py | 8 +++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index b08c2d3fcd5..449708fd25f 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -4085,7 +4085,7 @@ def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtyp @pytest.mark.parametrize("color_space", ["RGB", "GRAY"]) @pytest.mark.parametrize("batch_dims", [(1,), (2,), (4,)]) @pytest.mark.parametrize("dtype", [torch.uint8, torch.float32]) - def test_functional_cvcuda_parity(self, dimensions, kernel_size, sigma, color_space, batch_dims, dtype): + def test_functional_cvcuda_correctness(self, dimensions, kernel_size, sigma, color_space, batch_dims, dtype): height, width = dimensions image_tensor = make_image( @@ -4098,6 +4098,9 @@ def test_functional_cvcuda_parity(self, dimensions, kernel_size, sigma, color_sp actual_torch = F.cvcuda_to_tensor(actual) if dtype.is_floating_point: + # floating point precision differences between torch and cvcuda are present + # observed greatest absolute error is 0.3 + # most likely stemming from different implementation torch.testing.assert_close(actual_torch, expected, rtol=0, atol=0.3) else: # uint8/16 gaussians can differ by up to max-value, most likely an overflow issue diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index db5c86fa424..2888e242e23 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -1,5 +1,5 @@ import math -from typing import List, Optional, TYPE_CHECKING, Union +from typing import List, Optional, Tuple, TYPE_CHECKING, Union import PIL.Image import torch @@ -109,7 +109,7 @@ def _get_gaussian_kernel2d( def _validate_kernel_size_and_sigma( kernel_size: List[int] | int, sigma: Optional[Union[List[float], float, int]] = None, -) -> tuple[list[int], list[float]]: +) -> Tuple[List[int], List[float]]: # TODO: consider deprecating integers from sigma on the future if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] @@ -213,9 +213,7 @@ def _gaussian_blur_cvcuda( if CVCUDA_AVAILABLE: - _gaussian_blur_cvcuda_registered = _register_kernel_internal(gaussian_blur, _import_cvcuda().Tensor)( - _gaussian_blur_cvcuda - ) + _register_kernel_internal(gaussian_blur, _import_cvcuda().Tensor)(_gaussian_blur_cvcuda) def gaussian_noise(inpt: torch.Tensor, mean: float = 0.0, sigma: float = 0.1, clip: bool = True) -> torch.Tensor: From d9e3f839c2f1ccadfbf3b4d3e8db28514279d069 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Mon, 24 Nov 2025 12:52:24 -0800 Subject: [PATCH 12/18] match type hint in validate to gaussian_blur_image --- torchvision/transforms/v2/functional/_misc.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index 2888e242e23..877898605f1 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -1,5 +1,5 @@ import math -from typing import List, Optional, Tuple, TYPE_CHECKING, Union +from typing import List, Optional, Tuple, TYPE_CHECKING import PIL.Image import torch @@ -107,8 +107,8 @@ def _get_gaussian_kernel2d( def _validate_kernel_size_and_sigma( - kernel_size: List[int] | int, - sigma: Optional[Union[List[float], float, int]] = None, + kernel_size: List[int], + sigma: Optional[List[float]] = None, ) -> Tuple[List[int], List[float]]: # TODO: consider deprecating integers from sigma on the future if isinstance(kernel_size, int): From ccd4c1d04d19c057f2bec4f33624d732dd85c180 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Tue, 25 Nov 2025 16:00:40 -0800 Subject: [PATCH 13/18] resolve tests failing due to cvcuda.Tensor --- test/test_transforms_v2.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 449708fd25f..5ccb2fcee4e 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -3959,12 +3959,14 @@ def test_functional(self, make_input): (F.gaussian_blur_video, tv_tensors.Video), pytest.param( F._misc._gaussian_blur_cvcuda, - cvcuda.Tensor, + "cvcuda.Tensor", marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA is not available"), ), ], ) def test_functional_signature(self, kernel, input_type): + if input_type == "cvcuda.Tensor": + input_type = _import_cvcuda().Tensor check_functional_kernel_signature_match(F.gaussian_blur, kernel=kernel, input_type=input_type) @pytest.mark.parametrize( From b3d7814c869496a35a117566963ee49389cb1a4c Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Wed, 26 Nov 2025 09:39:20 -0800 Subject: [PATCH 14/18] fix guassian border mode to adhere to torch/opencv --- test/test_transforms_v2.py | 59 +++++++------------ torchvision/transforms/v2/functional/_misc.py | 2 +- 2 files changed, 22 insertions(+), 39 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 5ccb2fcee4e..1ec7c0fb99b 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -4044,11 +4044,22 @@ def test_make_params(self, sigma): ((1, 26, 28), (23, 23), 1.7), ], ) - @pytest.mark.parametrize("dtype", [torch.float32, torch.float64, torch.float16]) + @pytest.mark.parametrize("dtype", [torch.uint8, torch.float32, torch.float64, torch.float16]) @pytest.mark.parametrize("device", cpu_and_cuda()) - def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtype, device): + @pytest.mark.parametrize( + "input_type", + [ + tv_tensors.Image, + pytest.param( + "cvcuda.Tensor", marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available") + ), + ], + ) + def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtype, device, input_type): if dtype is torch.float16 and device == "cpu": pytest.skip("The CPU implementation of float16 on CPU differs from opencv") + if (dtype != torch.float32 and dtype != torch.uint8) and input_type == "cvcuda.Tensor": + pytest.skip("CVCUDA does not support non-float32 or uint8 dtypes for gaussian blur") num_channels, height, width = dimensions @@ -4068,45 +4079,17 @@ def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtyp device=device, ) - actual = F.gaussian_blur_image(image, kernel_size=kernel_size, sigma=sigma) - - torch.testing.assert_close(actual, expected, rtol=0, atol=1) - - @pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") - @needs_cuda - @pytest.mark.parametrize( - ("dimensions", "kernel_size", "sigma"), - [ - ((10, 12), (3, 3), 0.8), - ((10, 12), (3, 3), 0.5), - ((10, 12), (3, 5), 0.8), - ((10, 12), (3, 5), 0.5), - ((26, 28), (23, 23), 1.7), - ], - ) - @pytest.mark.parametrize("color_space", ["RGB", "GRAY"]) - @pytest.mark.parametrize("batch_dims", [(1,), (2,), (4,)]) - @pytest.mark.parametrize("dtype", [torch.uint8, torch.float32]) - def test_functional_cvcuda_correctness(self, dimensions, kernel_size, sigma, color_space, batch_dims, dtype): - height, width = dimensions + if input_type == "cvcuda.Tensor": + image = image.unsqueeze(0) + image = F.to_cvcuda_tensor(image) - image_tensor = make_image( - size=(height, width), color_space=color_space, batch_dims=batch_dims, dtype=dtype, device="cuda" - ) - image_cvcuda = F.to_cvcuda_tensor(image_tensor) + actual = F.gaussian_blur(image, kernel_size=kernel_size, sigma=sigma) - expected = F.gaussian_blur_image(image_tensor, kernel_size=kernel_size, sigma=sigma) - actual = F._misc._gaussian_blur_cvcuda(image_cvcuda, kernel_size=kernel_size, sigma=sigma) - actual_torch = F.cvcuda_to_tensor(actual) + if input_type == "cvcuda.Tensor": + actual = F.cvcuda_to_tensor(actual) + actual = actual.squeeze(0).to(device=device) - if dtype.is_floating_point: - # floating point precision differences between torch and cvcuda are present - # observed greatest absolute error is 0.3 - # most likely stemming from different implementation - torch.testing.assert_close(actual_torch, expected, rtol=0, atol=0.3) - else: - # uint8/16 gaussians can differ by up to max-value, most likely an overflow issue - torch.testing.assert_close(actual_torch, expected, rtol=0, atol=get_max_value(dtype)) + torch.testing.assert_close(actual, expected, rtol=0, atol=1) class TestGaussianNoise: diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index 877898605f1..dbb10b02e8d 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -208,7 +208,7 @@ def _gaussian_blur_cvcuda( image, tuple(kernel_size), tuple(sigma), - border=cvcuda.Border.REFLECT, + border=cvcuda.Border.REFLECT101, ) From 160e047172c1f0de025a050e38f0f4d9d855045f Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Mon, 1 Dec 2025 18:34:11 -0800 Subject: [PATCH 15/18] remove unneeded cvcuda setup --- torchvision/transforms/v2/functional/_augment.py | 11 +---------- torchvision/transforms/v2/functional/_color.py | 12 +----------- torchvision/transforms/v2/functional/_geometry.py | 2 +- 3 files changed, 3 insertions(+), 22 deletions(-) diff --git a/torchvision/transforms/v2/functional/_augment.py b/torchvision/transforms/v2/functional/_augment.py index 7ce5bdc7b7e..a904d8d7cbd 100644 --- a/torchvision/transforms/v2/functional/_augment.py +++ b/torchvision/transforms/v2/functional/_augment.py @@ -1,5 +1,4 @@ import io -from typing import TYPE_CHECKING import PIL.Image @@ -9,15 +8,7 @@ from torchvision.transforms.functional import pil_to_tensor, to_pil_image from torchvision.utils import _log_api_usage_once -from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal - - -CVCUDA_AVAILABLE = _is_cvcuda_available() - -if TYPE_CHECKING: - import cvcuda # type: ignore[import-not-found] -if CVCUDA_AVAILABLE: - cvcuda = _import_cvcuda() # noqa: F811 +from ._utils import _get_kernel, _register_kernel_internal def erase( diff --git a/torchvision/transforms/v2/functional/_color.py b/torchvision/transforms/v2/functional/_color.py index 5be9c62902a..be254c0d63a 100644 --- a/torchvision/transforms/v2/functional/_color.py +++ b/torchvision/transforms/v2/functional/_color.py @@ -1,5 +1,3 @@ -from typing import TYPE_CHECKING - import PIL.Image import torch from torch.nn.functional import conv2d @@ -11,15 +9,7 @@ from ._misc import _num_value_bits, to_dtype_image from ._type_conversion import pil_to_tensor, to_pil_image -from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal - - -CVCUDA_AVAILABLE = _is_cvcuda_available() - -if TYPE_CHECKING: - import cvcuda # type: ignore[import-not-found] -if CVCUDA_AVAILABLE: - cvcuda = _import_cvcuda() # noqa: F811 +from ._utils import _get_kernel, _register_kernel_internal def rgb_to_grayscale(inpt: torch.Tensor, num_output_channels: int = 1) -> torch.Tensor: diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 0e27218bc89..cb3ac70205a 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2,7 +2,7 @@ import numbers import warnings from collections.abc import Sequence -from typing import Any, Optional, TYPE_CHECKING, Union +from typing import Any, Optional, Union, TYPE_CHECKING import PIL.Image import torch From 5ce83b11bfef26f04cfb1b2417b6877a6762197e Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Mon, 1 Dec 2025 18:35:16 -0800 Subject: [PATCH 16/18] use assert_close --- test/test_transforms_v2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 1ec7c0fb99b..0f89643f65c 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -4089,7 +4089,7 @@ def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtyp actual = F.cvcuda_to_tensor(actual) actual = actual.squeeze(0).to(device=device) - torch.testing.assert_close(actual, expected, rtol=0, atol=1) + assert_close(actual, expected, rtol=0, atol=1) class TestGaussianNoise: From 3bcc517569949c918569fdc619228fcdfe491a13 Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Thu, 4 Dec 2025 11:14:51 -0800 Subject: [PATCH 17/18] update gaussian blur with main standards --- test/test_transforms_v2.py | 3 +-- torchvision/transforms/v2/_misc.py | 6 ++++++ torchvision/transforms/v2/_utils.py | 8 ++++---- torchvision/transforms/v2/functional/_misc.py | 4 ++-- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 0f89643f65c..8bf2bfa495d 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -25,7 +25,6 @@ assert_equal, cache, cpu_and_cuda, - cvcuda_to_pil_compatible_tensor, freeze_rng_state, ignore_jit_no_profile_information_warning, make_bounding_boxes, @@ -3958,7 +3957,7 @@ def test_functional(self, make_input): (F.gaussian_blur_image, tv_tensors.Image), (F.gaussian_blur_video, tv_tensors.Video), pytest.param( - F._misc._gaussian_blur_cvcuda, + F._misc._gaussian_blur_image_cvcuda, "cvcuda.Tensor", marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA is not available"), ), diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index 305149c87b1..c09e04b0834 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -9,6 +9,7 @@ from torchvision import transforms as _transforms, tv_tensors from torchvision.transforms.v2 import functional as F, Transform +from torchvision.transforms.v2.functional._utils import _is_cvcuda_available, _is_cvcuda_tensor from ._utils import ( _parse_labels_getter, @@ -20,6 +21,8 @@ is_pure_tensor, ) +CVCUDA_AVAILABLE = _is_cvcuda_available() + # TODO: do we want/need to expose this? class Identity(Transform): @@ -192,6 +195,9 @@ class GaussianBlur(Transform): _v1_transform_cls = _transforms.GaussianBlur + if CVCUDA_AVAILABLE: + _transformed_types = Transform._transformed_types + (_is_cvcuda_tensor,) + def __init__( self, kernel_size: Union[int, Sequence[int]], sigma: Union[int, float, Sequence[float]] = (0.1, 2.0) ) -> None: diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index 3fc33ce5964..e803aa49c60 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -15,8 +15,8 @@ from torchvision._utils import sequence_to_str from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401 -from torchvision.transforms.v2.functional import get_dimensions, get_size, is_cvcuda_tensor, is_pure_tensor -from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT +from torchvision.transforms.v2.functional import get_dimensions, get_size, is_pure_tensor +from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT, _is_cvcuda_tensor def _setup_number_or_seq(arg: int | float | Sequence[int | float], name: str) -> Sequence[float]: @@ -182,7 +182,7 @@ def query_chw(flat_inputs: list[Any]) -> tuple[int, int, int]: chws = { tuple(get_dimensions(inpt)) for inpt in flat_inputs - if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video, is_cvcuda_tensor)) + if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video, _is_cvcuda_tensor)) } if not chws: raise TypeError("No image or video was found in the sample") @@ -207,7 +207,7 @@ def query_size(flat_inputs: list[Any]) -> tuple[int, int]: tv_tensors.Mask, tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, - is_cvcuda_tensor, + _is_cvcuda_tensor, ), ) } diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index dbb10b02e8d..1a7a3b53ac8 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -197,7 +197,7 @@ def gaussian_blur_video( return gaussian_blur_image(video, kernel_size, sigma) -def _gaussian_blur_cvcuda( +def _gaussian_blur_image_cvcuda( image: "cvcuda.Tensor", kernel_size: list[int], sigma: Optional[list[float]] = None ) -> "cvcuda.Tensor": cvcuda = _import_cvcuda() @@ -213,7 +213,7 @@ def _gaussian_blur_cvcuda( if CVCUDA_AVAILABLE: - _register_kernel_internal(gaussian_blur, _import_cvcuda().Tensor)(_gaussian_blur_cvcuda) + _register_kernel_internal(gaussian_blur, _import_cvcuda().Tensor)(_gaussian_blur_image_cvcuda) def gaussian_noise(inpt: torch.Tensor, mean: float = 0.0, sigma: float = 0.1, clip: bool = True) -> torch.Tensor: From 2edfdffb425e933d2e5e579d68b7068b643863dd Mon Sep 17 00:00:00 2001 From: Justin Davis Date: Thu, 4 Dec 2025 13:44:54 -0800 Subject: [PATCH 18/18] check input type on kernel for signature test --- test/test_transforms_v2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 8bf2bfa495d..a5e394ec2ba 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -3958,13 +3958,13 @@ def test_functional(self, make_input): (F.gaussian_blur_video, tv_tensors.Video), pytest.param( F._misc._gaussian_blur_image_cvcuda, - "cvcuda.Tensor", + None, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA is not available"), ), ], ) def test_functional_signature(self, kernel, input_type): - if input_type == "cvcuda.Tensor": + if kernel is F._misc._gaussian_blur_image_cvcuda: input_type = _import_cvcuda().Tensor check_functional_kernel_signature_match(F.gaussian_blur, kernel=kernel, input_type=input_type)