From 5372b131605b3373d48a3752f5c9ddff6e272392 Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Tue, 25 May 2021 07:12:56 +0000 Subject: [PATCH 1/3] Refactored and fixed flaky resize tests --- test/test_functional_tensor.py | 151 ++++++++++++++++++--------------- 1 file changed, 81 insertions(+), 70 deletions(-) diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 9b73308e3e1..a5229164aa6 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -324,76 +324,6 @@ def test_pad(self): self._test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **kwargs) - def test_resize(self): - script_fn = torch.jit.script(F.resize) - tensor, pil_img = self._create_data(26, 36, device=self.device) - batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) - - for dt in [None, torch.float32, torch.float64, torch.float16]: - - if dt == torch.float16 and torch.device(self.device).type == "cpu": - # skip float16 on CPU case - continue - - if dt is not None: - # This is a trivial cast to float of uint8 data to test all cases - tensor = tensor.to(dt) - batch_tensors = batch_tensors.to(dt) - - for size in [32, 26, [32, ], [32, 32], (32, 32), [26, 35]]: - for max_size in (None, 33, 40, 1000): - if max_size is not None and isinstance(size, Sequence) and len(size) != 1: - continue # unsupported, see assertRaises below - for interpolation in [BILINEAR, BICUBIC, NEAREST]: - resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size) - resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size) - - assert_equal( - resized_tensor.size()[1:], - resized_pil_img.size[::-1], - msg="{}, {}".format(size, interpolation), - ) - - if interpolation not in [NEAREST, ]: - # We can not check values if mode = NEAREST, as results are different - # E.g. resized_tensor = [[a, a, b, c, d, d, e, ...]] - # E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]] - resized_tensor_f = resized_tensor - # we need to cast to uint8 to compare with PIL image - if resized_tensor_f.dtype == torch.uint8: - resized_tensor_f = resized_tensor_f.to(torch.float) - - # Pay attention to high tolerance for MAE - self.approxEqualTensorToPIL( - resized_tensor_f, resized_pil_img, tol=8.0, msg="{}, {}".format(size, interpolation) - ) - - if isinstance(size, int): - script_size = [size, ] - else: - script_size = size - - resize_result = script_fn(tensor, size=script_size, interpolation=interpolation, - max_size=max_size) - assert_equal(resized_tensor, resize_result, msg="{}, {}".format(size, interpolation)) - - self._test_fn_on_batch( - batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size - ) - - # assert changed type warning - with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): - res1 = F.resize(tensor, size=32, interpolation=2) - res2 = F.resize(tensor, size=32, interpolation=BILINEAR) - assert_equal(res1, res2) - - for img in (tensor, pil_img): - exp_msg = "max_size should only be passed if size specifies the length of the smaller edge" - with self.assertRaisesRegex(ValueError, exp_msg): - F.resize(img, size=(32, 34), max_size=35) - with self.assertRaisesRegex(ValueError, "max_size = 32 must be strictly greater"): - F.resize(img, size=32, max_size=32) - def test_resized_crop(self): # test values of F.resized_crop in several cases: # 1) resize to the same size, crop to the same size => should be identity @@ -868,6 +798,87 @@ def test_perspective_interpolation_warning(tester): tester.assertTrue(res1.equal(res2)) +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16]) +@pytest.mark.parametrize('size', [32, 26, [32, ], [32, 32], (32, 32), [26, 35]]) +@pytest.mark.parametrize('max_size', [None, 34, 40, 1000]) +@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC, NEAREST]) +def test_resize(device, dt, size, max_size, interpolation, tester): + + torch.manual_seed(12) + script_fn = torch.jit.script(F.resize) + tensor, pil_img = tester._create_data(26, 36, device=device) + batch_tensors = tester._create_data_batch(16, 18, num_samples=4, device=device) + + if dt == torch.float16 and device == "cpu": + # skip float16 on CPU case + return + + if dt is not None: + # This is a trivial cast to float of uint8 data to test all cases + tensor = tensor.to(dt) + batch_tensors = batch_tensors.to(dt) + + if max_size is not None and isinstance(size, Sequence) and len(size) != 1: + return # unsupported, see assertRaises below + + resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size) + resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size) + + assert_equal( + resized_tensor.size()[1:], + resized_pil_img.size[::-1], + ) + + if interpolation not in [NEAREST, ]: + # We can not check values if mode = NEAREST, as results are different + # E.g. resized_tensor = [[a, a, b, c, d, d, e, ...]] + # E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]] + resized_tensor_f = resized_tensor + # we need to cast to uint8 to compare with PIL image + if resized_tensor_f.dtype == torch.uint8: + resized_tensor_f = resized_tensor_f.to(torch.float) + + # Pay attention to high tolerance for MAE + tester.approxEqualTensorToPIL( + resized_tensor_f, resized_pil_img, tol=8.0, + msg="{}, {}, {}".format(size, max_size, interpolation) + ) + + if isinstance(size, int): + script_size = [size, ] + else: + script_size = size + + resize_result = script_fn( + tensor, size=script_size, interpolation=interpolation, max_size=max_size + ) + assert_equal(resized_tensor, resize_result, msg="{}, {}".format(size, interpolation)) + + tester._test_fn_on_batch( + batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +def test_resize_asserts(device, tester): + + tensor, pil_img = tester._create_data(26, 36, device=device) + + # assert changed type warning + with tester.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): + res1 = F.resize(tensor, size=32, interpolation=2) + res2 = F.resize(tensor, size=32, interpolation=BILINEAR) + assert_equal(res1, res2) + + for img in (tensor, pil_img): + exp_msg = "max_size should only be passed if size specifies the length of the smaller edge" + with tester.assertRaisesRegex(ValueError, exp_msg): + F.resize(img, size=(32, 34), max_size=35) + with tester.assertRaisesRegex(ValueError, "max_size = 32 must be strictly greater"): + F.resize(img, size=32, max_size=32) + + @pytest.mark.parametrize('device', cpu_and_gpu()) @pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16]) @pytest.mark.parametrize('size', [[96, 72], [96, 420], [420, 72]]) From 2fc5b52216057c317d5f9c5f46e32af4d3088324 Mon Sep 17 00:00:00 2001 From: vfdev Date: Tue, 25 May 2021 09:36:36 +0200 Subject: [PATCH 2/3] Update test/test_functional_tensor.py Co-authored-by: Philip Meier --- test/test_functional_tensor.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index a5229164aa6..02c2eb072fc 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -825,10 +825,7 @@ def test_resize(device, dt, size, max_size, interpolation, tester): resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size) resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size) - assert_equal( - resized_tensor.size()[1:], - resized_pil_img.size[::-1], - ) + assert resized_tensor.size()[1:] == resized_pil_img.size[::-1] if interpolation not in [NEAREST, ]: # We can not check values if mode = NEAREST, as results are different From 4f77906fe5fed462f3fc39b4a4f85e115bf62263 Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Tue, 25 May 2021 08:21:55 +0000 Subject: [PATCH 3/3] Fixed according the review --- test/test_functional_tensor.py | 35 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 02c2eb072fc..12a8d41914b 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -805,23 +805,23 @@ def test_perspective_interpolation_warning(tester): @pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC, NEAREST]) def test_resize(device, dt, size, max_size, interpolation, tester): + if dt == torch.float16 and device == "cpu": + # skip float16 on CPU case + return + + if max_size is not None and isinstance(size, Sequence) and len(size) != 1: + return # unsupported + torch.manual_seed(12) script_fn = torch.jit.script(F.resize) tensor, pil_img = tester._create_data(26, 36, device=device) batch_tensors = tester._create_data_batch(16, 18, num_samples=4, device=device) - if dt == torch.float16 and device == "cpu": - # skip float16 on CPU case - return - if dt is not None: # This is a trivial cast to float of uint8 data to test all cases tensor = tensor.to(dt) batch_tensors = batch_tensors.to(dt) - if max_size is not None and isinstance(size, Sequence) and len(size) != 1: - return # unsupported, see assertRaises below - resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size) resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size) @@ -837,10 +837,7 @@ def test_resize(device, dt, size, max_size, interpolation, tester): resized_tensor_f = resized_tensor_f.to(torch.float) # Pay attention to high tolerance for MAE - tester.approxEqualTensorToPIL( - resized_tensor_f, resized_pil_img, tol=8.0, - msg="{}, {}, {}".format(size, max_size, interpolation) - ) + tester.approxEqualTensorToPIL(resized_tensor_f, resized_pil_img, tol=8.0) if isinstance(size, int): script_size = [size, ] @@ -850,7 +847,7 @@ def test_resize(device, dt, size, max_size, interpolation, tester): resize_result = script_fn( tensor, size=script_size, interpolation=interpolation, max_size=max_size ) - assert_equal(resized_tensor, resize_result, msg="{}, {}".format(size, interpolation)) + assert_equal(resized_tensor, resize_result) tester._test_fn_on_batch( batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size @@ -863,16 +860,17 @@ def test_resize_asserts(device, tester): tensor, pil_img = tester._create_data(26, 36, device=device) # assert changed type warning - with tester.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): + with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"): res1 = F.resize(tensor, size=32, interpolation=2) - res2 = F.resize(tensor, size=32, interpolation=BILINEAR) - assert_equal(res1, res2) + + res2 = F.resize(tensor, size=32, interpolation=BILINEAR) + assert_equal(res1, res2) for img in (tensor, pil_img): exp_msg = "max_size should only be passed if size specifies the length of the smaller edge" - with tester.assertRaisesRegex(ValueError, exp_msg): + with pytest.raises(ValueError, match=exp_msg): F.resize(img, size=(32, 34), max_size=35) - with tester.assertRaisesRegex(ValueError, "max_size = 32 must be strictly greater"): + with pytest.raises(ValueError, match="max_size = 32 must be strictly greater"): F.resize(img, size=32, max_size=32) @@ -882,12 +880,11 @@ def test_resize_asserts(device, tester): @pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC]) def test_resize_antialias(device, dt, size, interpolation, tester): - torch.manual_seed(12) - if dt == torch.float16 and device == "cpu": # skip float16 on CPU case return + torch.manual_seed(12) script_fn = torch.jit.script(F.resize) tensor, pil_img = tester._create_data(320, 290, device=device)