From 603879fdf582718dfafa0928cca0533f82537093 Mon Sep 17 00:00:00 2001 From: harish Date: Sun, 6 Jun 2021 22:25:38 +0530 Subject: [PATCH 01/12] Group G,Group D test_five_crop test_ten_crop test_max_value test_linear_transformation test_autoaugment test_center_crop test_center_crop_2 test_color_jitter --- test/test_transforms.py | 488 ++++++++++++++++++++-------------------- 1 file changed, 248 insertions(+), 240 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index fe73b3c32ae..d53cb3bf803 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -32,176 +32,6 @@ class Tester(unittest.TestCase): - def test_center_crop(self): - height = random.randint(10, 32) * 2 - width = random.randint(10, 32) * 2 - oheight = random.randint(5, (height - 2) / 2) * 2 - owidth = random.randint(5, (width - 2) / 2) * 2 - - img = torch.ones(3, height, width) - oh1 = (height - oheight) // 2 - ow1 = (width - owidth) // 2 - imgnarrow = img[:, oh1:oh1 + oheight, ow1:ow1 + owidth] - imgnarrow.fill_(0) - result = transforms.Compose([ - transforms.ToPILImage(), - transforms.CenterCrop((oheight, owidth)), - transforms.ToTensor(), - ])(img) - self.assertEqual(result.sum(), 0, - "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) - oheight += 1 - owidth += 1 - result = transforms.Compose([ - transforms.ToPILImage(), - transforms.CenterCrop((oheight, owidth)), - transforms.ToTensor(), - ])(img) - sum1 = result.sum() - self.assertGreater(sum1, 1, - "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) - oheight += 1 - owidth += 1 - result = transforms.Compose([ - transforms.ToPILImage(), - transforms.CenterCrop((oheight, owidth)), - transforms.ToTensor(), - ])(img) - sum2 = result.sum() - self.assertGreater(sum2, 0, - "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) - self.assertGreater(sum2, sum1, - "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) - - def test_center_crop_2(self): - """ Tests when center crop size is larger than image size, along any dimension""" - even_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2) - odd_image_size = (even_image_size[0] + 1, even_image_size[1] + 1) - - # Since height is independent of width, we can ignore images with odd height and even width and vice-versa. - input_image_sizes = [even_image_size, odd_image_size] - - # Get different crop sizes - delta = random.choice((1, 3, 5)) - crop_size_delta = [-2 * delta, -delta, 0, delta, 2 * delta] - crop_size_params = itertools.product(input_image_sizes, crop_size_delta, crop_size_delta) - - for (input_image_size, delta_height, delta_width) in crop_size_params: - img = torch.ones(3, *input_image_size) - crop_size = (input_image_size[0] + delta_height, input_image_size[1] + delta_width) - - # Test both transforms, one with PIL input and one with tensor - output_pil = transforms.Compose([ - transforms.ToPILImage(), - transforms.CenterCrop(crop_size), - transforms.ToTensor()], - )(img) - self.assertEqual(output_pil.size()[1:3], crop_size, - "image_size: {} crop_size: {}".format(input_image_size, crop_size)) - - output_tensor = transforms.CenterCrop(crop_size)(img) - self.assertEqual(output_tensor.size()[1:3], crop_size, - "image_size: {} crop_size: {}".format(input_image_size, crop_size)) - - # Ensure output for PIL and Tensor are equal - assert_equal( - output_tensor, output_pil, check_stride=False, - msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) - ) - - # Check if content in center of both image and cropped output is same. - center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1])) - crop_center_tl, input_center_tl = [0, 0], [0, 0] - for index in range(2): - if crop_size[index] > input_image_size[index]: - crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2 - else: - input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2 - - output_center = output_pil[ - :, - crop_center_tl[0]:crop_center_tl[0] + center_size[0], - crop_center_tl[1]:crop_center_tl[1] + center_size[1] - ] - - img_center = img[ - :, - input_center_tl[0]:input_center_tl[0] + center_size[0], - input_center_tl[1]:input_center_tl[1] + center_size[1] - ] - - assert_equal( - output_center, img_center, check_stride=False, - msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) - ) - - def test_five_crop(self): - to_pil_image = transforms.ToPILImage() - h = random.randint(5, 25) - w = random.randint(5, 25) - for single_dim in [True, False]: - crop_h = random.randint(1, h) - crop_w = random.randint(1, w) - if single_dim: - crop_h = min(crop_h, crop_w) - crop_w = crop_h - transform = transforms.FiveCrop(crop_h) - else: - transform = transforms.FiveCrop((crop_h, crop_w)) - - img = torch.FloatTensor(3, h, w).uniform_() - results = transform(to_pil_image(img)) - - self.assertEqual(len(results), 5) - for crop in results: - self.assertEqual(crop.size, (crop_w, crop_h)) - - to_pil_image = transforms.ToPILImage() - tl = to_pil_image(img[:, 0:crop_h, 0:crop_w]) - tr = to_pil_image(img[:, 0:crop_h, w - crop_w:]) - bl = to_pil_image(img[:, h - crop_h:, 0:crop_w]) - br = to_pil_image(img[:, h - crop_h:, w - crop_w:]) - center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img)) - expected_output = (tl, tr, bl, br, center) - self.assertEqual(results, expected_output) - - def test_ten_crop(self): - to_pil_image = transforms.ToPILImage() - h = random.randint(5, 25) - w = random.randint(5, 25) - for should_vflip in [True, False]: - for single_dim in [True, False]: - crop_h = random.randint(1, h) - crop_w = random.randint(1, w) - if single_dim: - crop_h = min(crop_h, crop_w) - crop_w = crop_h - transform = transforms.TenCrop(crop_h, - vertical_flip=should_vflip) - five_crop = transforms.FiveCrop(crop_h) - else: - transform = transforms.TenCrop((crop_h, crop_w), - vertical_flip=should_vflip) - five_crop = transforms.FiveCrop((crop_h, crop_w)) - - img = to_pil_image(torch.FloatTensor(3, h, w).uniform_()) - results = transform(img) - expected_output = five_crop(img) - - # Checking if FiveCrop and TenCrop can be printed as string - transform.__repr__() - five_crop.__repr__() - - if should_vflip: - vflipped_img = img.transpose(Image.FLIP_TOP_BOTTOM) - expected_output += five_crop(vflipped_img) - else: - hflipped_img = img.transpose(Image.FLIP_LEFT_RIGHT) - expected_output += five_crop(hflipped_img) - - self.assertEqual(len(results), 10) - self.assertEqual(results, expected_output) - def test_randomresized_params(self): height = random.randint(24, 32) * 2 width = random.randint(24, 32) * 2 @@ -451,15 +281,6 @@ def test_to_tensor_with_other_default_dtypes(self): torch.set_default_dtype(current_def_dtype) - def test_max_value(self): - for dtype in int_dtypes(): - self.assertEqual(F_t._max_value(dtype), torch.iinfo(dtype).max) - - # remove float testing as it can lead to errors such as - # runtime error: 5.7896e+76 is outside the range of representable values of type 'float' - # for dtype in float_dtypes(): - # self.assertGreater(F_t._max_value(dtype), torch.finfo(dtype).max) - def test_convert_image_dtype_float_to_float(self): for input_dtype, output_dtypes in cycle_over(float_dtypes()): input_image = torch.tensor((0.0, 1.0), dtype=input_dtype) @@ -1026,57 +847,6 @@ def test_normalize_3d_tensor(self): torch.testing.assert_close(target, result1) torch.testing.assert_close(target, result2) - def test_color_jitter(self): - color_jitter = transforms.ColorJitter(2, 2, 2, 0.1) - - x_shape = [2, 2, 3] - x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] - x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) - x_pil = Image.fromarray(x_np, mode='RGB') - x_pil_2 = x_pil.convert('L') - - for i in range(10): - y_pil = color_jitter(x_pil) - self.assertEqual(y_pil.mode, x_pil.mode) - - y_pil_2 = color_jitter(x_pil_2) - self.assertEqual(y_pil_2.mode, x_pil_2.mode) - - # Checking if ColorJitter can be printed as string - color_jitter.__repr__() - - def test_linear_transformation(self): - num_samples = 1000 - x = torch.randn(num_samples, 3, 10, 10) - flat_x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3)) - # compute principal components - sigma = torch.mm(flat_x.t(), flat_x) / flat_x.size(0) - u, s, _ = np.linalg.svd(sigma.numpy()) - zca_epsilon = 1e-10 # avoid division by 0 - d = torch.Tensor(np.diag(1. / np.sqrt(s + zca_epsilon))) - u = torch.Tensor(u) - principal_components = torch.mm(torch.mm(u, d), u.t()) - mean_vector = (torch.sum(flat_x, dim=0) / flat_x.size(0)) - # initialize whitening matrix - whitening = transforms.LinearTransformation(principal_components, mean_vector) - # estimate covariance and mean using weak law of large number - num_features = flat_x.size(1) - cov = 0.0 - mean = 0.0 - for i in x: - xwhite = whitening(i) - xwhite = xwhite.view(1, -1).numpy() - cov += np.dot(xwhite, xwhite.T) / num_features - mean += np.sum(xwhite) / num_features - # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov - torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, check_dtype=False, - msg="cov not close to 1") - torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, check_dtype=False, - msg="mean not close to 0") - - # Checking if LinearTransformation can be printed as string - whitening.__repr__() - def test_affine(self): input_img = np.zeros((40, 40, 3), dtype=np.uint8) cnt = [20, 20] @@ -1279,16 +1049,6 @@ def test_random_affine(self): t = transforms.RandomAffine(10, interpolation=2) self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR) - def test_autoaugment(self): - for policy in transforms.AutoAugmentPolicy: - for fill in [None, 85, (128, 128, 128)]: - random.seed(42) - img = Image.open(GRACE_HOPPER) - transform = transforms.AutoAugment(policy=policy, fill=fill) - for _ in range(100): - img = transform(img) - transform.__repr__() - @unittest.skipIf(stats is None, 'scipy.stats not available') def test_random_erasing(self): img = torch.ones(3, 128, 128) @@ -1997,5 +1757,253 @@ def test_random_order(): random_order_transform.__repr__() +def test_linear_transformation(): + num_samples = 1000 + x = torch.randn(num_samples, 3, 10, 10) + flat_x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3)) + # compute principal components + sigma = torch.mm(flat_x.t(), flat_x) / flat_x.size(0) + u, s, _ = np.linalg.svd(sigma.numpy()) + zca_epsilon = 1e-10 # avoid division by 0 + d = torch.Tensor(np.diag(1. / np.sqrt(s + zca_epsilon))) + u = torch.Tensor(u) + principal_components = torch.mm(torch.mm(u, d), u.t()) + mean_vector = (torch.sum(flat_x, dim=0) / flat_x.size(0)) + # initialize whitening matrix + whitening = transforms.LinearTransformation(principal_components, mean_vector) + # estimate covariance and mean using weak law of large number + num_features = flat_x.size(1) + cov = 0.0 + mean = 0.0 + for i in x: + xwhite = whitening(i) + xwhite = xwhite.view(1, -1).numpy() + cov += np.dot(xwhite, xwhite.T) / num_features + mean += np.sum(xwhite) / num_features + # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov + torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, check_dtype=False, + msg="cov not close to 1") + torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, check_dtype=False, + msg="mean not close to 0") + + # Checking if LinearTransformation can be printed as string + whitening.__repr__() + + +def test_max_value(): + for dtype in int_dtypes(): + assert F_t._max_value(dtype) == torch.iinfo(dtype).max + + # remove float testing as it can lead to errors such as + # runtime error: 5.7896e+76 is outside the range of representable values of type 'float' + # for dtype in float_dtypes(): + # self.assertGreater(F_t._max_value(dtype), torch.finfo(dtype).max) + + +@pytest.mark.parametrize('should_vflip', [True, False]) +@pytest.mark.parametrize('single_dim', [True, False]) +def test_ten_crop(should_vflip, single_dim): + to_pil_image = transforms.ToPILImage() + h = random.randint(5, 25) + w = random.randint(5, 25) + crop_h = random.randint(1, h) + crop_w = random.randint(1, w) + if single_dim: + crop_h = min(crop_h, crop_w) + crop_w = crop_h + transform = transforms.TenCrop(crop_h, + vertical_flip=should_vflip) + five_crop = transforms.FiveCrop(crop_h) + else: + transform = transforms.TenCrop((crop_h, crop_w), + vertical_flip=should_vflip) + five_crop = transforms.FiveCrop((crop_h, crop_w)) + + img = to_pil_image(torch.FloatTensor(3, h, w).uniform_()) + results = transform(img) + expected_output = five_crop(img) + + # Checking if FiveCrop and TenCrop can be printed as string + transform.__repr__() + five_crop.__repr__() + + if should_vflip: + vflipped_img = img.transpose(Image.FLIP_TOP_BOTTOM) + expected_output += five_crop(vflipped_img) + else: + hflipped_img = img.transpose(Image.FLIP_LEFT_RIGHT) + expected_output += five_crop(hflipped_img) + + assert len(results) == 10 + assert results == expected_output + + +def test_five_crop(): + to_pil_image = transforms.ToPILImage() + h = random.randint(5, 25) + w = random.randint(5, 25) + for single_dim in [True, False]: + crop_h = random.randint(1, h) + crop_w = random.randint(1, w) + if single_dim: + crop_h = min(crop_h, crop_w) + crop_w = crop_h + transform = transforms.FiveCrop(crop_h) + else: + transform = transforms.FiveCrop((crop_h, crop_w)) + + img = torch.FloatTensor(3, h, w).uniform_() + results = transform(to_pil_image(img)) + + assert len(results) == 5 + for crop in results: + assert crop.size == (crop_w, crop_h) + + to_pil_image = transforms.ToPILImage() + tl = to_pil_image(img[:, 0:crop_h, 0:crop_w]) + tr = to_pil_image(img[:, 0:crop_h, w - crop_w:]) + bl = to_pil_image(img[:, h - crop_h:, 0:crop_w]) + br = to_pil_image(img[:, h - crop_h:, w - crop_w:]) + center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img)) + expected_output = (tl, tr, bl, br, center) + assert results == expected_output + + +@pytest.mark.parametrize('policy', transforms.AutoAugmentPolicy) +@pytest.mark.parametrize('fill', [None, 85, (128, 128, 128)]) +def test_autoaugment(policy, fill): + random.seed(42) + img = Image.open(GRACE_HOPPER) + transform = transforms.AutoAugment(policy=policy, fill=fill) + for _ in range(100): + img = transform(img) + transform.__repr__() + + +def test_center_crop(): + height = random.randint(10, 32) * 2 + width = random.randint(10, 32) * 2 + oheight = random.randint(5, (height - 2) / 2) * 2 + owidth = random.randint(5, (width - 2) / 2) * 2 + + img = torch.ones(3, height, width) + oh1 = (height - oheight) // 2 + ow1 = (width - owidth) // 2 + imgnarrow = img[:, oh1:oh1 + oheight, ow1:ow1 + owidth] + imgnarrow.fill_(0) + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + assertEqual(result.sum(), 0, + "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + oheight += 1 + owidth += 1 + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + sum1 = result.sum() + assertGreater(sum1, 1, + "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + oheight += 1 + owidth += 1 + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + sum2 = result.sum() + assertGreater(sum2, 0, + "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + assertGreater(sum2, sum1, + "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + + +def test_center_crop_2(): + """ Tests when center crop size is larger than image size, along any dimension""" + even_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2) + odd_image_size = (even_image_size[0] + 1, even_image_size[1] + 1) + + # Since height is independent of width, we can ignore images with odd height and even width and vice-versa. + input_image_sizes = [even_image_size, odd_image_size] + + # Get different crop sizes + delta = random.choice((1, 3, 5)) + crop_size_delta = [-2 * delta, -delta, 0, delta, 2 * delta] + crop_size_params = itertools.product(input_image_sizes, crop_size_delta, crop_size_delta) + + for (input_image_size, delta_height, delta_width) in crop_size_params: + img = torch.ones(3, *input_image_size) + crop_size = (input_image_size[0] + delta_height, input_image_size[1] + delta_width) + + # Test both transforms, one with PIL input and one with tensor + output_pil = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop(crop_size), + transforms.ToTensor()], + )(img) + assertEqual(output_pil.size()[1:3], crop_size, + "image_size: {} crop_size: {}".format(input_image_size, crop_size)) + + output_tensor = transforms.CenterCrop(crop_size)(img) + assertEqual(output_tensor.size()[1:3], crop_size, + "image_size: {} crop_size: {}".format(input_image_size, crop_size)) + + # Ensure output for PIL and Tensor are equal + assert_equal( + output_tensor, output_pil, check_stride=False, + msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + ) + + # Check if content in center of both image and cropped output is same. + center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1])) + crop_center_tl, input_center_tl = [0, 0], [0, 0] + for index in range(2): + if crop_size[index] > input_image_size[index]: + crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2 + else: + input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2 + + output_center = output_pil[ + :, + crop_center_tl[0]:crop_center_tl[0] + center_size[0], + crop_center_tl[1]:crop_center_tl[1] + center_size[1] + ] + + img_center = img[ + :, + input_center_tl[0]:input_center_tl[0] + center_size[0], + input_center_tl[1]:input_center_tl[1] + center_size[1] + ] + + assert_equal( + output_center, img_center, check_stride=False, + msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + ) + + +def test_color_jitter(): + color_jitter = transforms.ColorJitter(2, 2, 2, 0.1) + + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + x_pil_2 = x_pil.convert('L') + + for i in range(10): + y_pil = color_jitter(x_pil) + assert y_pil.mode == x_pil.mode + + y_pil_2 = color_jitter(x_pil_2) + assert y_pil_2.mode == x_pil_2.mode + + # Checking if ColorJitter can be printed as string + color_jitter.__repr__() + + if __name__ == '__main__': unittest.main() From 257f936ddd79baca39287a6fbec6f6f405c3e814 Mon Sep 17 00:00:00 2001 From: harish Date: Mon, 7 Jun 2021 16:37:34 +0530 Subject: [PATCH 02/12] modifications for assertEqual and assertGreater --- test/test_transforms.py | 82 +++++++++++++++++++---------------------- 1 file changed, 37 insertions(+), 45 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index d53cb3bf803..b425a74efdf 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1780,14 +1780,14 @@ def test_linear_transformation(): xwhite = xwhite.view(1, -1).numpy() cov += np.dot(xwhite, xwhite.T) / num_features mean += np.sum(xwhite) / num_features - # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov - torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, check_dtype=False, - msg="cov not close to 1") - torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, check_dtype=False, - msg="mean not close to 0") + # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov + torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, check_dtype=False, + msg="cov not close to 1") + torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, check_dtype=False, + msg="mean not close to 0") - # Checking if LinearTransformation can be printed as string - whitening.__repr__() + # Checking if LinearTransformation can be printed as string + whitening.__repr__() def test_max_value(): @@ -1819,13 +1819,13 @@ def test_ten_crop(should_vflip, single_dim): vertical_flip=should_vflip) five_crop = transforms.FiveCrop((crop_h, crop_w)) - img = to_pil_image(torch.FloatTensor(3, h, w).uniform_()) - results = transform(img) - expected_output = five_crop(img) + img = to_pil_image(torch.FloatTensor(3, h, w).uniform_()) + results = transform(img) + expected_output = five_crop(img) - # Checking if FiveCrop and TenCrop can be printed as string - transform.__repr__() - five_crop.__repr__() + # Checking if FiveCrop and TenCrop can be printed as string + transform.__repr__() + five_crop.__repr__() if should_vflip: vflipped_img = img.transpose(Image.FLIP_TOP_BOTTOM) @@ -1834,8 +1834,8 @@ def test_ten_crop(should_vflip, single_dim): hflipped_img = img.transpose(Image.FLIP_LEFT_RIGHT) expected_output += five_crop(hflipped_img) - assert len(results) == 10 - assert results == expected_output + assert len(results) == 10 + assert results == expected_output def test_five_crop(): @@ -1853,6 +1853,7 @@ def test_five_crop(): transform = transforms.FiveCrop((crop_h, crop_w)) img = torch.FloatTensor(3, h, w).uniform_() + results = transform(to_pil_image(img)) assert len(results) == 5 @@ -1896,8 +1897,7 @@ def test_center_crop(): transforms.CenterCrop((oheight, owidth)), transforms.ToTensor(), ])(img) - assertEqual(result.sum(), 0, - "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + assert result.sum() == 0 oheight += 1 owidth += 1 result = transforms.Compose([ @@ -1906,8 +1906,7 @@ def test_center_crop(): transforms.ToTensor(), ])(img) sum1 = result.sum() - assertGreater(sum1, 1, - "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + assert sum1 > 1 oheight += 1 owidth += 1 result = transforms.Compose([ @@ -1916,13 +1915,12 @@ def test_center_crop(): transforms.ToTensor(), ])(img) sum2 = result.sum() - assertGreater(sum2, 0, - "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) - assertGreater(sum2, sum1, - "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + assert sum2 > 0 + assert sum2 > sum1 -def test_center_crop_2(): +@pytest.mark.parametrize('index', range(2)) +def test_center_crop_2(index): """ Tests when center crop size is larger than image size, along any dimension""" even_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2) odd_image_size = (even_image_size[0] + 1, even_image_size[1] + 1) @@ -1945,12 +1943,10 @@ def test_center_crop_2(): transforms.CenterCrop(crop_size), transforms.ToTensor()], )(img) - assertEqual(output_pil.size()[1:3], crop_size, - "image_size: {} crop_size: {}".format(input_image_size, crop_size)) + assert output_pil.size()[1:3] == crop_size output_tensor = transforms.CenterCrop(crop_size)(img) - assertEqual(output_tensor.size()[1:3], crop_size, - "image_size: {} crop_size: {}".format(input_image_size, crop_size)) + assert output_tensor.size()[1:3] == crop_size # Ensure output for PIL and Tensor are equal assert_equal( @@ -1961,11 +1957,10 @@ def test_center_crop_2(): # Check if content in center of both image and cropped output is same. center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1])) crop_center_tl, input_center_tl = [0, 0], [0, 0] - for index in range(2): - if crop_size[index] > input_image_size[index]: - crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2 - else: - input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2 + if crop_size[index] > input_image_size[index]: + crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2 + else: + input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2 output_center = output_pil[ :, @@ -1979,13 +1974,11 @@ def test_center_crop_2(): input_center_tl[1]:input_center_tl[1] + center_size[1] ] - assert_equal( - output_center, img_center, check_stride=False, - msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) - ) + assert_equal(output_center, img_center, check_stride=False) -def test_color_jitter(): +@pytest.mark.parametrize('i', range(10)) +def test_color_jitter(i): color_jitter = transforms.ColorJitter(2, 2, 2, 0.1) x_shape = [2, 2, 3] @@ -1994,15 +1987,14 @@ def test_color_jitter(): x_pil = Image.fromarray(x_np, mode='RGB') x_pil_2 = x_pil.convert('L') - for i in range(10): - y_pil = color_jitter(x_pil) - assert y_pil.mode == x_pil.mode + y_pil = color_jitter(x_pil) + assert y_pil.mode == x_pil.mode - y_pil_2 = color_jitter(x_pil_2) - assert y_pil_2.mode == x_pil_2.mode + y_pil_2 = color_jitter(x_pil_2) + assert y_pil_2.mode == x_pil_2.mode - # Checking if ColorJitter can be printed as string - color_jitter.__repr__() + # Checking if ColorJitter can be printed as string + color_jitter.__repr__() if __name__ == '__main__': From dadd67b584de5edec91a6735b4156b76abbd199e Mon Sep 17 00:00:00 2001 From: harish Date: Mon, 7 Jun 2021 16:52:08 +0530 Subject: [PATCH 03/12] modifications done for parametzie issues --- test/test_transforms.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index b425a74efdf..9e7b379a551 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1919,8 +1919,7 @@ def test_center_crop(): assert sum2 > sum1 -@pytest.mark.parametrize('index', range(2)) -def test_center_crop_2(index): +def test_center_crop_2(): """ Tests when center crop size is larger than image size, along any dimension""" even_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2) odd_image_size = (even_image_size[0] + 1, even_image_size[1] + 1) @@ -1957,10 +1956,11 @@ def test_center_crop_2(index): # Check if content in center of both image and cropped output is same. center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1])) crop_center_tl, input_center_tl = [0, 0], [0, 0] - if crop_size[index] > input_image_size[index]: - crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2 - else: - input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2 + for index in range(2): + if crop_size[index] > input_image_size[index]: + crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2 + else: + input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2 output_center = output_pil[ :, From c1f35e09421ef88b6e6df716bd1693f38a14c1e5 Mon Sep 17 00:00:00 2001 From: harish Date: Mon, 7 Jun 2021 22:37:14 +0530 Subject: [PATCH 04/12] parametrizes issue resolved --- test/test_transforms.py | 56 ++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 9e7b379a551..c7bc154940b 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1790,14 +1790,14 @@ def test_linear_transformation(): whitening.__repr__() +@pytest.mark.parametrize(dtype, int_dtypes) def test_max_value(): - for dtype in int_dtypes(): - assert F_t._max_value(dtype) == torch.iinfo(dtype).max - # remove float testing as it can lead to errors such as - # runtime error: 5.7896e+76 is outside the range of representable values of type 'float' - # for dtype in float_dtypes(): - # self.assertGreater(F_t._max_value(dtype), torch.finfo(dtype).max) + assert F_t._max_value(dtype) == torch.iinfo(dtype).max + # remove float testing as it can lead to errors such as + # runtime error: 5.7896e+76 is outside the range of representable values of type 'float' + # for dtype in float_dtypes(): + # self.assertGreater(F_t._max_value(dtype), torch.finfo(dtype).max) @pytest.mark.parametrize('should_vflip', [True, False]) @@ -1838,36 +1838,36 @@ def test_ten_crop(should_vflip, single_dim): assert results == expected_output +@pytest.mark.parametrize('single_dim', [True, False]) def test_five_crop(): to_pil_image = transforms.ToPILImage() h = random.randint(5, 25) w = random.randint(5, 25) - for single_dim in [True, False]: - crop_h = random.randint(1, h) - crop_w = random.randint(1, w) - if single_dim: - crop_h = min(crop_h, crop_w) - crop_w = crop_h - transform = transforms.FiveCrop(crop_h) - else: - transform = transforms.FiveCrop((crop_h, crop_w)) + crop_h = random.randint(1, h) + crop_w = random.randint(1, w) + if single_dim: + crop_h = min(crop_h, crop_w) + crop_w = crop_h + transform = transforms.FiveCrop(crop_h) + else: + transform = transforms.FiveCrop((crop_h, crop_w)) - img = torch.FloatTensor(3, h, w).uniform_() + img = torch.FloatTensor(3, h, w).uniform_() - results = transform(to_pil_image(img)) + results = transform(to_pil_image(img)) - assert len(results) == 5 - for crop in results: - assert crop.size == (crop_w, crop_h) + assert len(results) == 5 + for crop in results: + assert crop.size == (crop_w, crop_h) - to_pil_image = transforms.ToPILImage() - tl = to_pil_image(img[:, 0:crop_h, 0:crop_w]) - tr = to_pil_image(img[:, 0:crop_h, w - crop_w:]) - bl = to_pil_image(img[:, h - crop_h:, 0:crop_w]) - br = to_pil_image(img[:, h - crop_h:, w - crop_w:]) - center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img)) - expected_output = (tl, tr, bl, br, center) - assert results == expected_output + to_pil_image = transforms.ToPILImage() + tl = to_pil_image(img[:, 0:crop_h, 0:crop_w]) + tr = to_pil_image(img[:, 0:crop_h, w - crop_w:]) + bl = to_pil_image(img[:, h - crop_h:, 0:crop_w]) + br = to_pil_image(img[:, h - crop_h:, w - crop_w:]) + center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img)) + expected_output = (tl, tr, bl, br, center) + assert results == expected_output @pytest.mark.parametrize('policy', transforms.AutoAugmentPolicy) From 2e6bcaae42f28ac7f0dba9e1adac7cccac58d086 Mon Sep 17 00:00:00 2001 From: harish Date: Mon, 7 Jun 2021 22:54:59 +0530 Subject: [PATCH 05/12] fixed issue dtype --- test/test_transforms.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index c7bc154940b..87784be673f 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1790,8 +1790,8 @@ def test_linear_transformation(): whitening.__repr__() -@pytest.mark.parametrize(dtype, int_dtypes) -def test_max_value(): +@pytest.mark.parametrize('dtype', 'int_dtypes') +def test_max_value(dtype, int_dtypes): assert F_t._max_value(dtype) == torch.iinfo(dtype).max # remove float testing as it can lead to errors such as From 5c763a2c657306c12496eb5ea3a08638284090de Mon Sep 17 00:00:00 2001 From: harish Date: Mon, 7 Jun 2021 23:25:50 +0530 Subject: [PATCH 06/12] modification for parameter issue --- test/test_transforms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 87784be673f..14c50f40a71 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1790,7 +1790,7 @@ def test_linear_transformation(): whitening.__repr__() -@pytest.mark.parametrize('dtype', 'int_dtypes') +@pytest.mark.parametrize('dtype', int_dtypes()) def test_max_value(dtype, int_dtypes): assert F_t._max_value(dtype) == torch.iinfo(dtype).max From 435a3dec6541e5118294d79be6551fea417c3104 Mon Sep 17 00:00:00 2001 From: harish Date: Mon, 7 Jun 2021 23:31:02 +0530 Subject: [PATCH 07/12] resolved parameter issue --- test/test_transforms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 14c50f40a71..d5f72689be4 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1839,7 +1839,7 @@ def test_ten_crop(should_vflip, single_dim): @pytest.mark.parametrize('single_dim', [True, False]) -def test_five_crop(): +def test_five_crop(single_dim): to_pil_image = transforms.ToPILImage() h = random.randint(5, 25) w = random.randint(5, 25) From 2c3511c56f39d51e5524bb1a55bc0512a875b8ff Mon Sep 17 00:00:00 2001 From: harish Date: Mon, 7 Jun 2021 23:42:40 +0530 Subject: [PATCH 08/12] resolved parameter issue --- test/test_transforms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index d5f72689be4..2332a2d1bbb 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1791,7 +1791,7 @@ def test_linear_transformation(): @pytest.mark.parametrize('dtype', int_dtypes()) -def test_max_value(dtype, int_dtypes): +def test_max_value(dtype): assert F_t._max_value(dtype) == torch.iinfo(dtype).max # remove float testing as it can lead to errors such as From 807c2c8bea362d0cd6d7954ddd370a528d02d22a Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 8 Jun 2021 09:17:13 +0100 Subject: [PATCH 09/12] parametrized test_center_crop_2 --- test/test_transforms.py | 108 ++++++++++++++++++++-------------------- 1 file changed, 55 insertions(+), 53 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 2332a2d1bbb..1267bfb5f8a 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1878,7 +1878,7 @@ def test_autoaugment(policy, fill): transform = transforms.AutoAugment(policy=policy, fill=fill) for _ in range(100): img = transform(img) - transform.__repr__() + transform.__repr__() def test_center_crop(): @@ -1919,66 +1919,67 @@ def test_center_crop(): assert sum2 > sum1 -def test_center_crop_2(): +delta = 1 +@pytest.mark.parametrize('odd_image_size', (True, False)) +@pytest.mark.parametrize('delta', (1, 3, 5)) +@pytest.mark.parametrize('delta_width', (-2, -1, 0, 1, 2)) +@pytest.mark.parametrize('delta_height', (-2, -1, 0, 1, 2)) +def test_center_crop_2(odd_image_size, delta, delta_width, delta_height): """ Tests when center crop size is larger than image size, along any dimension""" - even_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2) - odd_image_size = (even_image_size[0] + 1, even_image_size[1] + 1) # Since height is independent of width, we can ignore images with odd height and even width and vice-versa. - input_image_sizes = [even_image_size, odd_image_size] - - # Get different crop sizes - delta = random.choice((1, 3, 5)) - crop_size_delta = [-2 * delta, -delta, 0, delta, 2 * delta] - crop_size_params = itertools.product(input_image_sizes, crop_size_delta, crop_size_delta) + input_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2) + if odd_image_size: + input_image_size = (input_image_size[0] + 1, input_image_size[1] + 1) + + delta_height *= delta + delta_width *= delta - for (input_image_size, delta_height, delta_width) in crop_size_params: - img = torch.ones(3, *input_image_size) - crop_size = (input_image_size[0] + delta_height, input_image_size[1] + delta_width) + img = torch.ones(3, *input_image_size) + crop_size = (input_image_size[0] + delta_height, input_image_size[1] + delta_width) # Test both transforms, one with PIL input and one with tensor - output_pil = transforms.Compose([ - transforms.ToPILImage(), - transforms.CenterCrop(crop_size), - transforms.ToTensor()], - )(img) - assert output_pil.size()[1:3] == crop_size - - output_tensor = transforms.CenterCrop(crop_size)(img) - assert output_tensor.size()[1:3] == crop_size - - # Ensure output for PIL and Tensor are equal - assert_equal( - output_tensor, output_pil, check_stride=False, - msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) - ) + output_pil = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop(crop_size), + transforms.ToTensor()], + )(img) + assert output_pil.size()[1:3] == crop_size + + output_tensor = transforms.CenterCrop(crop_size)(img) + assert output_tensor.size()[1:3] == crop_size + + # Ensure output for PIL and Tensor are equal + assert_equal( + output_tensor, output_pil, check_stride=False, + msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + ) - # Check if content in center of both image and cropped output is same. - center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1])) - crop_center_tl, input_center_tl = [0, 0], [0, 0] - for index in range(2): - if crop_size[index] > input_image_size[index]: - crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2 - else: - input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2 + # Check if content in center of both image and cropped output is same. + center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1])) + crop_center_tl, input_center_tl = [0, 0], [0, 0] + for index in range(2): + if crop_size[index] > input_image_size[index]: + crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2 + else: + input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2 - output_center = output_pil[ - :, - crop_center_tl[0]:crop_center_tl[0] + center_size[0], - crop_center_tl[1]:crop_center_tl[1] + center_size[1] - ] + output_center = output_pil[ + :, + crop_center_tl[0]:crop_center_tl[0] + center_size[0], + crop_center_tl[1]:crop_center_tl[1] + center_size[1] + ] - img_center = img[ - :, - input_center_tl[0]:input_center_tl[0] + center_size[0], - input_center_tl[1]:input_center_tl[1] + center_size[1] - ] + img_center = img[ + :, + input_center_tl[0]:input_center_tl[0] + center_size[0], + input_center_tl[1]:input_center_tl[1] + center_size[1] + ] - assert_equal(output_center, img_center, check_stride=False) + assert_equal(output_center, img_center, check_stride=False) -@pytest.mark.parametrize('i', range(10)) -def test_color_jitter(i): +def test_color_jitter(): color_jitter = transforms.ColorJitter(2, 2, 2, 0.1) x_shape = [2, 2, 3] @@ -1987,11 +1988,12 @@ def test_color_jitter(i): x_pil = Image.fromarray(x_np, mode='RGB') x_pil_2 = x_pil.convert('L') - y_pil = color_jitter(x_pil) - assert y_pil.mode == x_pil.mode + for _ in range(10): + y_pil = color_jitter(x_pil) + assert y_pil.mode == x_pil.mode - y_pil_2 = color_jitter(x_pil_2) - assert y_pil_2.mode == x_pil_2.mode + y_pil_2 = color_jitter(x_pil_2) + assert y_pil_2.mode == x_pil_2.mode # Checking if ColorJitter can be printed as string color_jitter.__repr__() From 880317f74e12376ef5e655f46055389813cb19a1 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 8 Jun 2021 09:55:34 +0100 Subject: [PATCH 10/12] put back randomcrop --- test/test_transforms.py | 44 ++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index cb79ae5792a..146ccf520c4 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1688,37 +1688,45 @@ def test_random_crop(): width = random.randint(10, 32) * 2 oheight = random.randint(5, (height - 2) / 2) * 2 owidth = random.randint(5, (width - 2) / 2) * 2 - img = torch.ones(3, height, width) - oh1 = (height - oheight) // 2 - ow1 = (width - owidth) // 2 - imgnarrow = img[:, oh1:oh1 + oheight, ow1:ow1 + owidth] - imgnarrow.fill_(0) result = transforms.Compose([ transforms.ToPILImage(), - transforms.CenterCrop((oheight, owidth)), + transforms.RandomCrop((oheight, owidth)), transforms.ToTensor(), ])(img) - assert result.sum() == 0 - oheight += 1 - owidth += 1 + assert result.size(1) == oheight + assert result.size(2) == owidth + + padding = random.randint(1, 20) result = transforms.Compose([ transforms.ToPILImage(), - transforms.CenterCrop((oheight, owidth)), + transforms.RandomCrop((oheight, owidth), padding=padding), transforms.ToTensor(), ])(img) - sum1 = result.sum() - assert sum1 > 1 - oheight += 1 - owidth += 1 + assert result.size(1) == oheight + assert result.size(2) == owidth + result = transforms.Compose([ transforms.ToPILImage(), - transforms.CenterCrop((oheight, owidth)), + transforms.RandomCrop((height, width)), + transforms.ToTensor() + ])(img) + assert result.size(1) == height + assert result.size(2) == width + torch.testing.assert_close(result, img) + + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.RandomCrop((height + 1, width + 1), pad_if_needed=True), transforms.ToTensor(), ])(img) - sum2 = result.sum() - assert sum2 > 0 - assert sum2 > sum1 + assert result.size(1) == height + 1 + assert result.size(2) == width + 1 + + t = transforms.RandomCrop(48) + img = torch.ones(3, 32, 32) + with pytest.raises(ValueError, match=r"Required crop size .+ is larger then input image size .+"): + t(img) @pytest.mark.parametrize('odd_image_size', (True, False)) From 32f65450443783f1452477f15688ff68d9c3bd9b Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 8 Jun 2021 09:57:55 +0100 Subject: [PATCH 11/12] put back test_center_crop --- test/test_transforms.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/test/test_transforms.py b/test/test_transforms.py index 146ccf520c4..119537dfecb 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1729,6 +1729,44 @@ def test_random_crop(): t(img) +def test_center_crop(): + height = random.randint(10, 32) * 2 + width = random.randint(10, 32) * 2 + oheight = random.randint(5, (height - 2) / 2) * 2 + owidth = random.randint(5, (width - 2) / 2) * 2 + + img = torch.ones(3, height, width) + oh1 = (height - oheight) // 2 + ow1 = (width - owidth) // 2 + imgnarrow = img[:, oh1:oh1 + oheight, ow1:ow1 + owidth] + imgnarrow.fill_(0) + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + assert result.sum() == 0 + oheight += 1 + owidth += 1 + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + sum1 = result.sum() + assert sum1 > 1 + oheight += 1 + owidth += 1 + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + sum2 = result.sum() + assert sum2 > 0 + assert sum2 > sum1 + + @pytest.mark.parametrize('odd_image_size', (True, False)) @pytest.mark.parametrize('delta', (1, 3, 5)) @pytest.mark.parametrize('delta_width', (-2, -1, 0, 1, 2)) From b40d26c577c331dc87df40dcd53d0153d3b07973 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 8 Jun 2021 10:00:04 +0100 Subject: [PATCH 12/12] pep8 --- test/test_transforms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 119537dfecb..188ba40668d 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1778,7 +1778,7 @@ def test_center_crop_2(odd_image_size, delta, delta_width, delta_height): input_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2) if odd_image_size: input_image_size = (input_image_size[0] + 1, input_image_size[1] + 1) - + delta_height *= delta delta_width *= delta