diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 3ce603c3ed2..a5e394ec2ba 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -21,6 +21,7 @@ import torchvision.transforms.v2 as transforms from common_utils import ( + assert_close, assert_equal, cache, cpu_and_cuda, @@ -41,7 +42,6 @@ ) from torch import nn -from torch.testing import assert_close from torch.utils._pytree import tree_flatten, tree_map from torch.utils.data import DataLoader, default_collate from torchvision import tv_tensors @@ -3936,7 +3936,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_video, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA is not available") + ), + ], ) def test_functional(self, make_input): check_functional(F.gaussian_blur, make_input(), kernel_size=(3, 3)) @@ -3948,14 +3956,31 @@ def test_functional(self, make_input): (F._misc._gaussian_blur_image_pil, PIL.Image.Image), (F.gaussian_blur_image, tv_tensors.Image), (F.gaussian_blur_video, tv_tensors.Video), + pytest.param( + F._misc._gaussian_blur_image_cvcuda, + None, + marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA is not available"), + ), ], ) def test_functional_signature(self, kernel, input_type): + if kernel is F._misc._gaussian_blur_image_cvcuda: + input_type = _import_cvcuda().Tensor check_functional_kernel_signature_match(F.gaussian_blur, kernel=kernel, input_type=input_type) @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA is not available") + ), + ], ) @pytest.mark.parametrize("device", cpu_and_cuda()) @pytest.mark.parametrize("sigma", [5, 2.0, (0.5, 2), [1.3, 2.7]]) @@ -4018,11 +4043,22 @@ def test_make_params(self, sigma): ((1, 26, 28), (23, 23), 1.7), ], ) - @pytest.mark.parametrize("dtype", [torch.float32, torch.float64, torch.float16]) + @pytest.mark.parametrize("dtype", [torch.uint8, torch.float32, torch.float64, torch.float16]) @pytest.mark.parametrize("device", cpu_and_cuda()) - def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtype, device): + @pytest.mark.parametrize( + "input_type", + [ + tv_tensors.Image, + pytest.param( + "cvcuda.Tensor", marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="CVCUDA not available") + ), + ], + ) + def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtype, device, input_type): if dtype is torch.float16 and device == "cpu": pytest.skip("The CPU implementation of float16 on CPU differs from opencv") + if (dtype != torch.float32 and dtype != torch.uint8) and input_type == "cvcuda.Tensor": + pytest.skip("CVCUDA does not support non-float32 or uint8 dtypes for gaussian blur") num_channels, height, width = dimensions @@ -4042,9 +4078,17 @@ def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtyp device=device, ) - actual = F.gaussian_blur_image(image, kernel_size=kernel_size, sigma=sigma) + if input_type == "cvcuda.Tensor": + image = image.unsqueeze(0) + image = F.to_cvcuda_tensor(image) - torch.testing.assert_close(actual, expected, rtol=0, atol=1) + actual = F.gaussian_blur(image, kernel_size=kernel_size, sigma=sigma) + + if input_type == "cvcuda.Tensor": + actual = F.cvcuda_to_tensor(actual) + actual = actual.squeeze(0).to(device=device) + + assert_close(actual, expected, rtol=0, atol=1) class TestGaussianNoise: diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index 305149c87b1..c09e04b0834 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -9,6 +9,7 @@ from torchvision import transforms as _transforms, tv_tensors from torchvision.transforms.v2 import functional as F, Transform +from torchvision.transforms.v2.functional._utils import _is_cvcuda_available, _is_cvcuda_tensor from ._utils import ( _parse_labels_getter, @@ -20,6 +21,8 @@ is_pure_tensor, ) +CVCUDA_AVAILABLE = _is_cvcuda_available() + # TODO: do we want/need to expose this? class Identity(Transform): @@ -192,6 +195,9 @@ class GaussianBlur(Transform): _v1_transform_cls = _transforms.GaussianBlur + if CVCUDA_AVAILABLE: + _transformed_types = Transform._transformed_types + (_is_cvcuda_tensor,) + def __init__( self, kernel_size: Union[int, Sequence[int]], sigma: Union[int, float, Sequence[float]] = (0.1, 2.0) ) -> None: diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index bb6051b4e61..e803aa49c60 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -16,7 +16,7 @@ from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401 from torchvision.transforms.v2.functional import get_dimensions, get_size, is_pure_tensor -from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT +from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT, _is_cvcuda_tensor def _setup_number_or_seq(arg: int | float | Sequence[int | float], name: str) -> Sequence[float]: @@ -182,7 +182,7 @@ def query_chw(flat_inputs: list[Any]) -> tuple[int, int, int]: chws = { tuple(get_dimensions(inpt)) for inpt in flat_inputs - if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video)) + if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video, _is_cvcuda_tensor)) } if not chws: raise TypeError("No image or video was found in the sample") @@ -207,6 +207,7 @@ def query_size(flat_inputs: list[Any]) -> tuple[int, int]: tv_tensors.Mask, tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, + _is_cvcuda_tensor, ), ) } diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 0e27218bc89..cb3ac70205a 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2,7 +2,7 @@ import numbers import warnings from collections.abc import Sequence -from typing import Any, Optional, TYPE_CHECKING, Union +from typing import Any, Optional, Union, TYPE_CHECKING import PIL.Image import torch diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 6b8f19f12f4..af03ad018d4 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -51,6 +51,16 @@ def get_dimensions_video(video: torch.Tensor) -> list[int]: return get_dimensions_image(video) +def get_dimensions_image_cvcuda(image: "cvcuda.Tensor") -> list[int]: + # CV-CUDA tensor is always in NHWC layout + # get_dimensions is CHW + return [image.shape[3], image.shape[1], image.shape[2]] + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(get_dimensions, cvcuda.Tensor)(get_dimensions_image_cvcuda) + + def get_num_channels(inpt: torch.Tensor) -> int: if torch.jit.is_scripting(): return get_num_channels_image(inpt) @@ -87,6 +97,16 @@ def get_num_channels_video(video: torch.Tensor) -> int: get_image_num_channels = get_num_channels +def get_num_channels_image_cvcuda(image: "cvcuda.Tensor") -> int: + # CV-CUDA tensor is always in NHWC layout + # get_num_channels is C + return image.shape[3] + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(get_num_channels, cvcuda.Tensor)(get_num_channels_image_cvcuda) + + def get_size(inpt: torch.Tensor) -> list[int]: if torch.jit.is_scripting(): return get_size_image(inpt) @@ -125,7 +145,7 @@ def get_size_image_cvcuda(image: "cvcuda.Tensor") -> list[int]: if CVCUDA_AVAILABLE: - _get_size_image_cvcuda = _register_kernel_internal(get_size, cvcuda.Tensor)(get_size_image_cvcuda) + _register_kernel_internal(get_size, _import_cvcuda().Tensor)(get_size_image_cvcuda) @_register_kernel_internal(get_size, tv_tensors.Video, tv_tensor_wrapper=False) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index daf263df046..1a7a3b53ac8 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -1,5 +1,5 @@ import math -from typing import Optional +from typing import List, Optional, Tuple, TYPE_CHECKING import PIL.Image import torch @@ -13,7 +13,14 @@ from ._meta import _convert_bounding_box_format -from ._utils import _get_kernel, _register_kernel_internal, is_pure_tensor +from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal, is_pure_tensor + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def normalize( @@ -99,11 +106,10 @@ def _get_gaussian_kernel2d( return kernel2d -@_register_kernel_internal(gaussian_blur, torch.Tensor) -@_register_kernel_internal(gaussian_blur, tv_tensors.Image) -def gaussian_blur_image( - image: torch.Tensor, kernel_size: list[int], sigma: Optional[list[float]] = None -) -> torch.Tensor: +def _validate_kernel_size_and_sigma( + kernel_size: List[int], + sigma: Optional[List[float]] = None, +) -> Tuple[List[int], List[float]]: # TODO: consider deprecating integers from sigma on the future if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] @@ -132,6 +138,16 @@ def gaussian_blur_image( if s <= 0.0: raise ValueError(f"sigma should have positive values. Got {sigma}") + return kernel_size, sigma + + +@_register_kernel_internal(gaussian_blur, torch.Tensor) +@_register_kernel_internal(gaussian_blur, tv_tensors.Image) +def gaussian_blur_image( + image: torch.Tensor, kernel_size: list[int], sigma: Optional[list[float]] = None +) -> torch.Tensor: + kernel_size, sigma = _validate_kernel_size_and_sigma(kernel_size, sigma) + if image.numel() == 0: return image @@ -181,6 +197,25 @@ def gaussian_blur_video( return gaussian_blur_image(video, kernel_size, sigma) +def _gaussian_blur_image_cvcuda( + image: "cvcuda.Tensor", kernel_size: list[int], sigma: Optional[list[float]] = None +) -> "cvcuda.Tensor": + cvcuda = _import_cvcuda() + + kernel_size, sigma = _validate_kernel_size_and_sigma(kernel_size, sigma) + + return cvcuda.gaussian( + image, + tuple(kernel_size), + tuple(sigma), + border=cvcuda.Border.REFLECT101, + ) + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(gaussian_blur, _import_cvcuda().Tensor)(_gaussian_blur_image_cvcuda) + + def gaussian_noise(inpt: torch.Tensor, mean: float = 0.0, sigma: float = 0.1, clip: bool = True) -> torch.Tensor: """See :class:`~torchvision.transforms.v2.GaussianNoise`""" if torch.jit.is_scripting():