From 18cc1a93d43fa6acd65a2ead5296424136778ea8 Mon Sep 17 00:00:00 2001 From: vfdev-5 Date: Thu, 22 Oct 2020 13:05:19 +0200 Subject: [PATCH] Improved test of Resize on PIL images --- test/test_transforms.py | 95 +++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 42 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index b8045703267..f9add6d1b57 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -215,53 +215,64 @@ def test_randomperspective_fill(self): F.perspective(img_conv, startpoints, endpoints, fill=tuple([fill] * wrong_num_bands)) def test_resize(self): - height = random.randint(24, 32) * 2 - width = random.randint(24, 32) * 2 - osize = random.randint(5, 12) * 2 - # TODO: Check output size check for bug-fix, improve this later - t = transforms.Resize(osize) - self.assertTrue(isinstance(t.size, int)) - self.assertEqual(t.size, osize) + input_sizes = [ + # height, width + # square image + (28, 28), + (27, 27), + # rectangular image: h < w + (28, 34), + (29, 35), + # rectangular image: h > w + (34, 28), + (35, 29), + ] + test_output_sizes_1 = [ + # single integer + 22, 27, 28, 36, + # single integer in tuple/list + [22, ], (27, ), + ] + test_output_sizes_2 = [ + # two integers + [22, 22], [22, 28], [22, 36], + [27, 22], [36, 22], [28, 28], + [28, 37], [37, 27], [37, 37] + ] + + for height, width in input_sizes: + img = Image.new("RGB", size=(width, height), color=127) + + for osize in test_output_sizes_1: + + t = transforms.Resize(osize) + result = t(img) + + msg = "{}, {} - {}".format(height, width, osize) + osize = osize[0] if isinstance(osize, (list, tuple)) else osize + # If size is an int, smaller edge of the image will be matched to this number. + # i.e, if height > width, then image will be rescaled to (size * height / width, size). + if height < width: + expected_size = (int(osize * width / height), osize) # (w, h) + self.assertEqual(result.size, expected_size, msg=msg) + elif width < height: + expected_size = (osize, int(osize * height / width)) # (w, h) + self.assertEqual(result.size, expected_size, msg=msg) + else: + expected_size = (osize, osize) # (w, h) + self.assertEqual(result.size, expected_size, msg=msg) - img = torch.ones(3, height, width) - result = transforms.Compose([ - transforms.ToPILImage(), - transforms.Resize(osize), - transforms.ToTensor(), - ])(img) - self.assertIn(osize, result.size()) - if height < width: - self.assertLessEqual(result.size(1), result.size(2)) - elif width < height: - self.assertGreaterEqual(result.size(1), result.size(2)) + for height, width in input_sizes: + img = Image.new("RGB", size=(width, height), color=127) - result = transforms.Compose([ - transforms.ToPILImage(), - transforms.Resize([osize, osize]), - transforms.ToTensor(), - ])(img) - self.assertIn(osize, result.size()) - self.assertEqual(result.size(1), osize) - self.assertEqual(result.size(2), osize) + for osize in test_output_sizes_2: + oheight, owidth = osize - oheight = random.randint(5, 12) * 2 - owidth = random.randint(5, 12) * 2 - result = transforms.Compose([ - transforms.ToPILImage(), - transforms.Resize((oheight, owidth)), - transforms.ToTensor(), - ])(img) - self.assertEqual(result.size(1), oheight) - self.assertEqual(result.size(2), owidth) + t = transforms.Resize(osize) + result = t(img) - result = transforms.Compose([ - transforms.ToPILImage(), - transforms.Resize([oheight, owidth]), - transforms.ToTensor(), - ])(img) - self.assertEqual(result.size(1), oheight) - self.assertEqual(result.size(2), owidth) + self.assertEqual((owidth, oheight), result.size) def test_random_crop(self): height = random.randint(10, 32) * 2