Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
149 changes: 77 additions & 72 deletions test/test_functional_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,76 +324,6 @@ def test_pad(self):

self._test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **kwargs)

def test_resize(self):
script_fn = torch.jit.script(F.resize)
tensor, pil_img = self._create_data(26, 36, device=self.device)
batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device)

for dt in [None, torch.float32, torch.float64, torch.float16]:

if dt == torch.float16 and torch.device(self.device).type == "cpu":
# skip float16 on CPU case
continue

if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)
batch_tensors = batch_tensors.to(dt)

for size in [32, 26, [32, ], [32, 32], (32, 32), [26, 35]]:
for max_size in (None, 33, 40, 1000):
if max_size is not None and isinstance(size, Sequence) and len(size) != 1:
continue # unsupported, see assertRaises below
for interpolation in [BILINEAR, BICUBIC, NEAREST]:
resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size)
resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size)

assert_equal(
resized_tensor.size()[1:],
resized_pil_img.size[::-1],
msg="{}, {}".format(size, interpolation),
)

if interpolation not in [NEAREST, ]:
# We can not check values if mode = NEAREST, as results are different
# E.g. resized_tensor = [[a, a, b, c, d, d, e, ...]]
# E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]]
resized_tensor_f = resized_tensor
# we need to cast to uint8 to compare with PIL image
if resized_tensor_f.dtype == torch.uint8:
resized_tensor_f = resized_tensor_f.to(torch.float)

# Pay attention to high tolerance for MAE
self.approxEqualTensorToPIL(
resized_tensor_f, resized_pil_img, tol=8.0, msg="{}, {}".format(size, interpolation)
)

if isinstance(size, int):
script_size = [size, ]
else:
script_size = size

resize_result = script_fn(tensor, size=script_size, interpolation=interpolation,
max_size=max_size)
assert_equal(resized_tensor, resize_result, msg="{}, {}".format(size, interpolation))

self._test_fn_on_batch(
batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size
)

# assert changed type warning
with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"):
res1 = F.resize(tensor, size=32, interpolation=2)
res2 = F.resize(tensor, size=32, interpolation=BILINEAR)
assert_equal(res1, res2)

for img in (tensor, pil_img):
exp_msg = "max_size should only be passed if size specifies the length of the smaller edge"
with self.assertRaisesRegex(ValueError, exp_msg):
F.resize(img, size=(32, 34), max_size=35)
with self.assertRaisesRegex(ValueError, "max_size = 32 must be strictly greater"):
F.resize(img, size=32, max_size=32)

def test_resized_crop(self):
# test values of F.resized_crop in several cases:
# 1) resize to the same size, crop to the same size => should be identity
Expand Down Expand Up @@ -868,18 +798,93 @@ def test_perspective_interpolation_warning(tester):
tester.assertTrue(res1.equal(res2))


@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize('size', [32, 26, [32, ], [32, 32], (32, 32), [26, 35]])
@pytest.mark.parametrize('max_size', [None, 34, 40, 1000])
@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC, NEAREST])
def test_resize(device, dt, size, max_size, interpolation, tester):

if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return

if max_size is not None and isinstance(size, Sequence) and len(size) != 1:
return # unsupported

torch.manual_seed(12)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we have to set the seed? If yes maybe add a comment with a potential FIXME?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should create deterministic and random input instead purely random one

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, we generally don't do that, but I agree with you that it's better practice when we know the test aren't flaky.

Another option is to actually parametrize the seed, which we can do if the tests are very fast.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Otherwise I would put it module-wise. Seed as parameter is something new to me :)

Copy link
Member

@NicolasHug NicolasHug May 25, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is already setting the seed module-wise, at least for the tests that will be run after this one.

In the long term we'll use something local like rng = np.random.RandomState(0) (surely there's a pytorch equivalent?) and generate stuff from the local rng variable, but for now this is good enough

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the long term we'll use something local like rng = np.random.RandomState(0) (surely there's a pytorch equivalent?) and generate stuff from the local rng variable, but for now this is good enough

Yes, something like that : https://pytorch.org/docs/stable/generated/torch.Generator.html#torch.Generator.manual_seed

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For all tests that do not perform a loop and thus only need one seed per test invocation, we could use an autouse fixture that is defined in the root conftest.py

import pytest

@pytest.fixture(scope="function", autouse=True)
def random_seed():
    torch.manual_seed(12)

script_fn = torch.jit.script(F.resize)
tensor, pil_img = tester._create_data(26, 36, device=device)
batch_tensors = tester._create_data_batch(16, 18, num_samples=4, device=device)

if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)
batch_tensors = batch_tensors.to(dt)

resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size)
resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size)

assert resized_tensor.size()[1:] == resized_pil_img.size[::-1]

if interpolation not in [NEAREST, ]:
# We can not check values if mode = NEAREST, as results are different
# E.g. resized_tensor = [[a, a, b, c, d, d, e, ...]]
# E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]]
resized_tensor_f = resized_tensor
# we need to cast to uint8 to compare with PIL image
if resized_tensor_f.dtype == torch.uint8:
resized_tensor_f = resized_tensor_f.to(torch.float)

# Pay attention to high tolerance for MAE
tester.approxEqualTensorToPIL(resized_tensor_f, resized_pil_img, tol=8.0)

if isinstance(size, int):
script_size = [size, ]
else:
script_size = size

resize_result = script_fn(
tensor, size=script_size, interpolation=interpolation, max_size=max_size
)
assert_equal(resized_tensor, resize_result)

tester._test_fn_on_batch(
batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size
)


@pytest.mark.parametrize('device', cpu_and_gpu())
def test_resize_asserts(device, tester):

tensor, pil_img = tester._create_data(26, 36, device=device)

# assert changed type warning
with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"):
res1 = F.resize(tensor, size=32, interpolation=2)

res2 = F.resize(tensor, size=32, interpolation=BILINEAR)
assert_equal(res1, res2)

for img in (tensor, pil_img):
exp_msg = "max_size should only be passed if size specifies the length of the smaller edge"
with pytest.raises(ValueError, match=exp_msg):
F.resize(img, size=(32, 34), max_size=35)
with pytest.raises(ValueError, match="max_size = 32 must be strictly greater"):
F.resize(img, size=32, max_size=32)


@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize('size', [[96, 72], [96, 420], [420, 72]])
@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC])
def test_resize_antialias(device, dt, size, interpolation, tester):

torch.manual_seed(12)

if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return

torch.manual_seed(12)
script_fn = torch.jit.script(F.resize)
tensor, pil_img = tester._create_data(320, 290, device=device)

Expand Down