Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions android/ops/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,6 @@ file(GLOB VISION_SRCS
../../torchvision/csrc/ops/*.h
../../torchvision/csrc/ops/*.cpp)

# Remove interpolate_aa sources as they are temporary code
# see https://github.com/pytorch/vision/pull/3761
# and IndexingUtils.h is unavailable on Android build
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/interpolate_aa.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/interpolate_aa.h")

add_library(${TARGET} SHARED
${VISION_SRCS}
)
Expand Down
7 changes: 0 additions & 7 deletions ios/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,6 @@ file(GLOB VISION_SRCS
../torchvision/csrc/ops/*.h
../torchvision/csrc/ops/*.cpp)

# Remove interpolate_aa sources as they are temporary code
# see https://github.com/pytorch/vision/pull/3761
# and using TensorIterator unavailable with iOS
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../torchvision/csrc/ops/interpolate_aa.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../torchvision/csrc/ops/interpolate_aa.h")

add_library(${TARGET} STATIC
${VISION_SRCS}
)
Expand Down
35 changes: 9 additions & 26 deletions test/test_functional_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import math
import os
import re
from functools import partial
from typing import Sequence

import numpy as np
Expand Down Expand Up @@ -655,11 +656,13 @@ def test_resize_antialias(device, dt, size, interpolation):
def test_assert_resize_antialias(interpolation):

# Checks implementation on very large scales
# and catch TORCH_CHECK inside interpolate_aa_kernels.cu
# and catch TORCH_CHECK inside PyTorch implementation
torch.manual_seed(12)
tensor, pil_img = _create_data(1000, 1000, device="cuda")
tensor, _ = _create_data(1000, 1000, device="cuda")

with pytest.raises(RuntimeError, match=r"Max supported scale factor is"):
# Error message is not yet updated in pytorch nightly
# with pytest.raises(RuntimeError, match=r"Provided interpolation parameters can not be handled"):
with pytest.raises(RuntimeError, match=r"Too much shared memory required"):
F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True)


Expand All @@ -674,32 +677,12 @@ def test_interpolate_antialias_backward(device, dt, size, interpolation):
return

torch.manual_seed(12)
if interpolation == BILINEAR:
forward_op = torch.ops.torchvision._interpolate_bilinear2d_aa
backward_op = torch.ops.torchvision._interpolate_bilinear2d_aa_backward
elif interpolation == BICUBIC:
forward_op = torch.ops.torchvision._interpolate_bicubic2d_aa
backward_op = torch.ops.torchvision._interpolate_bicubic2d_aa_backward

class F(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = forward_op(i, size, False)
ctx.save_for_backward(i, result)
return result

@staticmethod
def backward(ctx, grad_output):
i, result = ctx.saved_tensors
ishape = i.shape
oshape = result.shape[2:]
return backward_op(grad_output, oshape, ishape, False)

x = (torch.rand(1, 32, 29, 3, dtype=torch.double, device=device).permute(0, 3, 1, 2).requires_grad_(True),)
assert torch.autograd.gradcheck(F.apply, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)
resize = partial(F.resize, size=size, interpolation=interpolation, antialias=True)
assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)

x = (torch.rand(1, 3, 32, 29, dtype=torch.double, device=device, requires_grad=True),)
assert torch.autograd.gradcheck(F.apply, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)
assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)


def check_functional_vs_PIL_vs_scripted(
Expand Down
Loading