From a7d19d7952c5c98027d369438c3b7b656ee06c13 Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Tue, 1 Feb 2022 11:30:45 +0000 Subject: [PATCH 1/2] Removed custom ops for interp with AA --- test/test_functional_tensor.py | 34 +++++---------------- torchvision/transforms/functional_tensor.py | 10 ++---- 2 files changed, 11 insertions(+), 33 deletions(-) diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index b4807c11f51..0e32ce143cb 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -3,6 +3,7 @@ import math import os from typing import Sequence +from functools import partial import numpy as np import pytest @@ -637,11 +638,12 @@ def test_resize_antialias(device, dt, size, interpolation): def test_assert_resize_antialias(interpolation): # Checks implementation on very large scales - # and catch TORCH_CHECK inside interpolate_aa_kernels.cu + # and catch TORCH_CHECK inside PyTorch implementation torch.manual_seed(12) - tensor, pil_img = _create_data(1000, 1000, device="cuda") + tensor, _ = _create_data(1000, 1000, device="cuda") - with pytest.raises(RuntimeError, match=r"Max supported scale factor is"): + # with pytest.raises(RuntimeError, match=r"Provided interpolation parameters can not be handled"): + with pytest.raises(RuntimeError, match=r"Too much"): F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True) @@ -656,32 +658,12 @@ def test_interpolate_antialias_backward(device, dt, size, interpolation): return torch.manual_seed(12) - if interpolation == BILINEAR: - forward_op = torch.ops.torchvision._interpolate_bilinear2d_aa - backward_op = torch.ops.torchvision._interpolate_bilinear2d_aa_backward - elif interpolation == BICUBIC: - forward_op = torch.ops.torchvision._interpolate_bicubic2d_aa - backward_op = torch.ops.torchvision._interpolate_bicubic2d_aa_backward - - class F(torch.autograd.Function): - @staticmethod - def forward(ctx, i): - result = forward_op(i, size, False) - ctx.save_for_backward(i, result) - return result - - @staticmethod - def backward(ctx, grad_output): - i, result = ctx.saved_tensors - ishape = i.shape - oshape = result.shape[2:] - return backward_op(grad_output, oshape, ishape, False) - x = (torch.rand(1, 32, 29, 3, dtype=torch.double, device=device).permute(0, 3, 1, 2).requires_grad_(True),) - assert torch.autograd.gradcheck(F.apply, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False) + resize = partial(F.resize, size=size, interpolation=interpolation, antialias=True) + assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False) x = (torch.rand(1, 3, 32, 29, dtype=torch.double, device=device, requires_grad=True),) - assert torch.autograd.gradcheck(F.apply, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False) + assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False) def check_functional_vs_PIL_vs_scripted( diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index 4e20c19e45f..05e94e22536 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -550,13 +550,9 @@ def resize( # Define align_corners to avoid warnings align_corners = False if interpolation in ["bilinear", "bicubic"] else None - if antialias: - if interpolation == "bilinear": - img = torch.ops.torchvision._interpolate_bilinear2d_aa(img, [new_h, new_w], align_corners=False) - elif interpolation == "bicubic": - img = torch.ops.torchvision._interpolate_bicubic2d_aa(img, [new_h, new_w], align_corners=False) - else: - img = interpolate(img, size=[new_h, new_w], mode=interpolation, align_corners=align_corners) + img = interpolate( + img, size=[new_h, new_w], mode=interpolation, align_corners=align_corners, antialias=antialias + ) if interpolation == "bicubic" and out_dtype == torch.uint8: img = img.clamp(min=0, max=255) From 7755a6b7e596a70e36b9311930b168307dd28218 Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Tue, 1 Feb 2022 13:13:45 +0000 Subject: [PATCH 2/2] Fixed umft issues --- test/test_functional_tensor.py | 2 +- torchvision/transforms/functional_tensor.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 1d71ed82669..db7b9c28765 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -2,8 +2,8 @@ import itertools import math import os -from typing import Sequence from functools import partial +from typing import Sequence import numpy as np import pytest diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index 05e94e22536..137d5d37f8f 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -550,9 +550,7 @@ def resize( # Define align_corners to avoid warnings align_corners = False if interpolation in ["bilinear", "bicubic"] else None - img = interpolate( - img, size=[new_h, new_w], mode=interpolation, align_corners=align_corners, antialias=antialias - ) + img = interpolate(img, size=[new_h, new_w], mode=interpolation, align_corners=align_corners, antialias=antialias) if interpolation == "bicubic" and out_dtype == torch.uint8: img = img.clamp(min=0, max=255)