From 701855b7773b338efa3315a58611a28813f21958 Mon Sep 17 00:00:00 2001 From: zhiqiang Date: Fri, 4 Jun 2021 02:08:18 +0800 Subject: [PATCH 01/10] Resolve connflicts --- test/test_transforms.py | 503 +++++++++++++++++++++------------------- 1 file changed, 258 insertions(+), 245 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 33d81c657da..1584051966b 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -768,251 +768,6 @@ def test_accimage_crop(self): self.assertEqual(expected_output.size(), output.size()) torch.testing.assert_close(output, expected_output) - def test_1_channel_tensor_to_pil_image(self): - to_tensor = transforms.ToTensor() - - img_data_float = torch.Tensor(1, 4, 4).uniform_() - img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255) - img_data_short = torch.ShortTensor(1, 4, 4).random_() - img_data_int = torch.IntTensor(1, 4, 4).random_() - - inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] - expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(), - img_data_byte.float().div(255.0).numpy(), - img_data_short.numpy(), - img_data_int.numpy()] - expected_modes = ['L', 'L', 'I;16', 'I'] - - for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - self.assertEqual(img.mode, mode) - torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) - # 'F' mode for torch.FloatTensor - img_F_mode = transforms.ToPILImage(mode='F')(img_data_float) - self.assertEqual(img_F_mode.mode, 'F') - torch.testing.assert_close( - np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')), np.array(img_F_mode) - ) - - def test_1_channel_ndarray_to_pil_image(self): - img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy() - img_data_byte = torch.ByteTensor(4, 4, 1).random_(0, 255).numpy() - img_data_short = torch.ShortTensor(4, 4, 1).random_().numpy() - img_data_int = torch.IntTensor(4, 4, 1).random_().numpy() - - inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] - expected_modes = ['F', 'L', 'I;16', 'I'] - for img_data, mode in zip(inputs, expected_modes): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - self.assertEqual(img.mode, mode) - # note: we explicitly convert img's dtype because pytorch doesn't support uint16 - # and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array - torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) - - def test_2_channel_ndarray_to_pil_image(self): - def verify_img_data(img_data, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - self.assertEqual(img.mode, 'LA') # default should assume LA - else: - img = transforms.ToPILImage(mode=mode)(img_data) - self.assertEqual(img.mode, mode) - split = img.split() - for i in range(2): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) - - img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() - for mode in [None, 'LA']: - verify_img_data(img_data, mode) - - transforms.ToPILImage().__repr__() - - with self.assertRaises(ValueError): - # should raise if we try a mode for 4 or 1 or 3 channel images - transforms.ToPILImage(mode='RGBA')(img_data) - transforms.ToPILImage(mode='P')(img_data) - transforms.ToPILImage(mode='RGB')(img_data) - - def test_2_channel_tensor_to_pil_image(self): - def verify_img_data(img_data, expected_output, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - self.assertEqual(img.mode, 'LA') # default should assume LA - else: - img = transforms.ToPILImage(mode=mode)(img_data) - self.assertEqual(img.mode, mode) - split = img.split() - for i in range(2): - self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy())) - - img_data = torch.Tensor(2, 4, 4).uniform_() - expected_output = img_data.mul(255).int().float().div(255) - for mode in [None, 'LA']: - verify_img_data(img_data, expected_output, mode=mode) - - with self.assertRaises(ValueError): - # should raise if we try a mode for 4 or 1 or 3 channel images - transforms.ToPILImage(mode='RGBA')(img_data) - transforms.ToPILImage(mode='P')(img_data) - transforms.ToPILImage(mode='RGB')(img_data) - - def test_3_channel_tensor_to_pil_image(self): - def verify_img_data(img_data, expected_output, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - self.assertEqual(img.mode, 'RGB') # default should assume RGB - else: - img = transforms.ToPILImage(mode=mode)(img_data) - self.assertEqual(img.mode, mode) - split = img.split() - for i in range(3): - self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy())) - - img_data = torch.Tensor(3, 4, 4).uniform_() - expected_output = img_data.mul(255).int().float().div(255) - for mode in [None, 'RGB', 'HSV', 'YCbCr']: - verify_img_data(img_data, expected_output, mode=mode) - - with self.assertRaises(ValueError): - # should raise if we try a mode for 4 or 1 or 2 channel images - transforms.ToPILImage(mode='RGBA')(img_data) - transforms.ToPILImage(mode='P')(img_data) - transforms.ToPILImage(mode='LA')(img_data) - - with self.assertRaises(ValueError): - transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_()) - - def test_3_channel_ndarray_to_pil_image(self): - def verify_img_data(img_data, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - self.assertEqual(img.mode, 'RGB') # default should assume RGB - else: - img = transforms.ToPILImage(mode=mode)(img_data) - self.assertEqual(img.mode, mode) - split = img.split() - for i in range(3): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) - - img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() - for mode in [None, 'RGB', 'HSV', 'YCbCr']: - verify_img_data(img_data, mode) - - # Checking if ToPILImage can be printed as string - transforms.ToPILImage().__repr__() - - with self.assertRaises(ValueError): - # should raise if we try a mode for 4 or 1 or 2 channel images - transforms.ToPILImage(mode='RGBA')(img_data) - transforms.ToPILImage(mode='P')(img_data) - transforms.ToPILImage(mode='LA')(img_data) - - def test_4_channel_tensor_to_pil_image(self): - def verify_img_data(img_data, expected_output, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - self.assertEqual(img.mode, 'RGBA') # default should assume RGBA - else: - img = transforms.ToPILImage(mode=mode)(img_data) - self.assertEqual(img.mode, mode) - - split = img.split() - for i in range(4): - self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy())) - - img_data = torch.Tensor(4, 4, 4).uniform_() - expected_output = img_data.mul(255).int().float().div(255) - for mode in [None, 'RGBA', 'CMYK', 'RGBX']: - verify_img_data(img_data, expected_output, mode) - - with self.assertRaises(ValueError): - # should raise if we try a mode for 3 or 1 or 2 channel images - transforms.ToPILImage(mode='RGB')(img_data) - transforms.ToPILImage(mode='P')(img_data) - transforms.ToPILImage(mode='LA')(img_data) - - def test_4_channel_ndarray_to_pil_image(self): - def verify_img_data(img_data, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - self.assertEqual(img.mode, 'RGBA') # default should assume RGBA - else: - img = transforms.ToPILImage(mode=mode)(img_data) - self.assertEqual(img.mode, mode) - split = img.split() - for i in range(4): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) - - img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() - for mode in [None, 'RGBA', 'CMYK', 'RGBX']: - verify_img_data(img_data, mode) - - with self.assertRaises(ValueError): - # should raise if we try a mode for 3 or 1 or 2 channel images - transforms.ToPILImage(mode='RGB')(img_data) - transforms.ToPILImage(mode='P')(img_data) - transforms.ToPILImage(mode='LA')(img_data) - - def test_2d_tensor_to_pil_image(self): - to_tensor = transforms.ToTensor() - - img_data_float = torch.Tensor(4, 4).uniform_() - img_data_byte = torch.ByteTensor(4, 4).random_(0, 255) - img_data_short = torch.ShortTensor(4, 4).random_() - img_data_int = torch.IntTensor(4, 4).random_() - - inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] - expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(), - img_data_byte.float().div(255.0).numpy(), - img_data_short.numpy(), - img_data_int.numpy()] - expected_modes = ['L', 'L', 'I;16', 'I'] - - for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - self.assertEqual(img.mode, mode) - np.testing.assert_allclose(expected_output, to_tensor(img).numpy()[0]) - - def test_2d_ndarray_to_pil_image(self): - img_data_float = torch.Tensor(4, 4).uniform_().numpy() - img_data_byte = torch.ByteTensor(4, 4).random_(0, 255).numpy() - img_data_short = torch.ShortTensor(4, 4).random_().numpy() - img_data_int = torch.IntTensor(4, 4).random_().numpy() - - inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] - expected_modes = ['F', 'L', 'I;16', 'I'] - for img_data, mode in zip(inputs, expected_modes): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - self.assertEqual(img.mode, mode) - np.testing.assert_allclose(img_data, img) - - def test_tensor_bad_types_to_pil_image(self): - with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'): - transforms.ToPILImage()(torch.ones(1, 3, 4, 4)) - with self.assertRaisesRegex(ValueError, r'pic should not have > 4 channels. Got \d+ channels.'): - transforms.ToPILImage()(torch.ones(6, 4, 4)) - - def test_ndarray_bad_types_to_pil_image(self): - trans = transforms.ToPILImage() - reg_msg = r'Input type \w+ is not supported' - with self.assertRaisesRegex(TypeError, reg_msg): - trans(np.ones([4, 4, 1], np.int64)) - with self.assertRaisesRegex(TypeError, reg_msg): - trans(np.ones([4, 4, 1], np.uint16)) - with self.assertRaisesRegex(TypeError, reg_msg): - trans(np.ones([4, 4, 1], np.uint32)) - with self.assertRaisesRegex(TypeError, reg_msg): - trans(np.ones([4, 4, 1], np.float64)) - - with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'): - transforms.ToPILImage()(np.ones([1, 4, 4, 3])) - with self.assertRaisesRegex(ValueError, r'pic should not have > 4 channels. Got \d+ channels.'): - transforms.ToPILImage()(np.ones([4, 4, 6])) - @unittest.skipIf(stats is None, 'scipy.stats not available') def test_random_vertical_flip(self): random_state = random.getstate() @@ -1794,6 +1549,264 @@ def test_randomness(fn, trans, config, p): assert p_value > 0.0001 +def test_1_channel_tensor_to_pil_image(): + to_tensor = transforms.ToTensor() + + img_data_float = torch.Tensor(1, 4, 4).uniform_() + img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255) + img_data_short = torch.ShortTensor(1, 4, 4).random_() + img_data_int = torch.IntTensor(1, 4, 4).random_() + + inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] + expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(), + img_data_byte.float().div(255.0).numpy(), + img_data_short.numpy(), + img_data_int.numpy()] + expected_modes = ['L', 'L', 'I;16', 'I'] + + for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + assert img.mode == mode + torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) + + # 'F' mode for torch.FloatTensor + img_F_mode = transforms.ToPILImage(mode='F')(img_data_float) + assert img_F_mode.mode == 'F' + torch.testing.assert_close( + np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')), np.array(img_F_mode) + ) + + +def test_1_channel_ndarray_to_pil_image(): + img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy() + img_data_byte = torch.ByteTensor(4, 4, 1).random_(0, 255).numpy() + img_data_short = torch.ShortTensor(4, 4, 1).random_().numpy() + img_data_int = torch.IntTensor(4, 4, 1).random_().numpy() + + inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] + expected_modes = ['F', 'L', 'I;16', 'I'] + for img_data, mode in zip(inputs, expected_modes): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + assert img.mode == mode + # note: we explicitly convert img's dtype because pytorch doesn't support uint16 + # and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array + torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) + + +def test_2_channel_ndarray_to_pil_image(): + def verify_img_data(img_data, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'LA' # default should assume LA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + split = img.split() + for i in range(2): + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + + img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() + for mode in [None, 'LA']: + verify_img_data(img_data, mode) + + transforms.ToPILImage().__repr__() + + with pytest.raises(ValueError): + # should raise if we try a mode for 4 or 1 or 3 channel images + transforms.ToPILImage(mode='RGBA')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='RGB')(img_data) + + +def test_2_channel_tensor_to_pil_image(): + def verify_img_data(img_data, expected_output, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'LA' # default should assume LA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + split = img.split() + for i in range(2): + assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) + + img_data = torch.Tensor(2, 4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255) + for mode in [None, 'LA']: + verify_img_data(img_data, expected_output, mode=mode) + + with pytest.raises(ValueError): + # should raise if we try a mode for 4 or 1 or 3 channel images + transforms.ToPILImage(mode='RGBA')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='RGB')(img_data) + + +def test_2d_tensor_to_pil_image(): + to_tensor = transforms.ToTensor() + + img_data_float = torch.Tensor(4, 4).uniform_() + img_data_byte = torch.ByteTensor(4, 4).random_(0, 255) + img_data_short = torch.ShortTensor(4, 4).random_() + img_data_int = torch.IntTensor(4, 4).random_() + + inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] + expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(), + img_data_byte.float().div(255.0).numpy(), + img_data_short.numpy(), + img_data_int.numpy()] + expected_modes = ['L', 'L', 'I;16', 'I'] + + for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + assert img.mode == mode + np.testing.assert_allclose(expected_output, to_tensor(img).numpy()[0]) + + +def test_2d_ndarray_to_pil_image(): + img_data_float = torch.Tensor(4, 4).uniform_().numpy() + img_data_byte = torch.ByteTensor(4, 4).random_(0, 255).numpy() + img_data_short = torch.ShortTensor(4, 4).random_().numpy() + img_data_int = torch.IntTensor(4, 4).random_().numpy() + + inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] + expected_modes = ['F', 'L', 'I;16', 'I'] + for img_data, mode in zip(inputs, expected_modes): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + assert img.mode == mode + np.testing.assert_allclose(img_data, img) + + +def test_3_channel_tensor_to_pil_image(): + def verify_img_data(img_data, expected_output, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'RGB' # default should assume RGB + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + split = img.split() + for i in range(3): + assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) + + img_data = torch.Tensor(3, 4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255) + for mode in [None, 'RGB', 'HSV', 'YCbCr']: + verify_img_data(img_data, expected_output, mode=mode) + + with pytest.raises(ValueError): + # should raise if we try a mode for 4 or 1 or 2 channel images + transforms.ToPILImage(mode='RGBA')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='LA')(img_data) + + with pytest.raises(ValueError): + transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_()) + + +def test_3_channel_ndarray_to_pil_image(): + def verify_img_data(img_data, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'RGB' # default should assume RGB + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + split = img.split() + for i in range(3): + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + + img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() + for mode in [None, 'RGB', 'HSV', 'YCbCr']: + verify_img_data(img_data, mode) + + # Checking if ToPILImage can be printed as string + transforms.ToPILImage().__repr__() + + with pytest.raises(ValueError): + # should raise if we try a mode for 4 or 1 or 2 channel images + transforms.ToPILImage(mode='RGBA')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='LA')(img_data) + + +def test_4_channel_tensor_to_pil_image(): + def verify_img_data(img_data, expected_output, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'RGBA' # default should assume RGBA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + + split = img.split() + for i in range(4): + assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) + + img_data = torch.Tensor(4, 4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255) + for mode in [None, 'RGBA', 'CMYK', 'RGBX']: + verify_img_data(img_data, expected_output, mode) + + with pytest.raises(ValueError): + # should raise if we try a mode for 3 or 1 or 2 channel images + transforms.ToPILImage(mode='RGB')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='LA')(img_data) + + +def test_4_channel_ndarray_to_pil_image(): + def verify_img_data(img_data, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'RGBA' # default should assume RGBA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + split = img.split() + for i in range(4): + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + + img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() + for mode in [None, 'RGBA', 'CMYK', 'RGBX']: + verify_img_data(img_data, mode) + + with pytest.raises(ValueError): + # should raise if we try a mode for 3 or 1 or 2 channel images + transforms.ToPILImage(mode='RGB')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='LA')(img_data) + + +def test_ndarray_bad_types_to_pil_image(): + trans = transforms.ToPILImage() + reg_msg = r'Input type \w+ is not supported' + with pytest.raises(TypeError, match=reg_msg): + trans(np.ones([4, 4, 1], np.int64)) + with pytest.raises(TypeError, match=reg_msg): + trans(np.ones([4, 4, 1], np.uint16)) + with pytest.raises(TypeError, match=reg_msg): + trans(np.ones([4, 4, 1], np.uint32)) + with pytest.raises(TypeError, match=reg_msg): + trans(np.ones([4, 4, 1], np.float64)) + + with pytest.raises(ValueError, match=r'pic should be 2/3 dimensional. Got \d+ dimensions.'): + transforms.ToPILImage()(np.ones([1, 4, 4, 3])) + with pytest.raises(ValueError, match=r'pic should not have > 4 channels. Got \d+ channels.'): + transforms.ToPILImage()(np.ones([4, 4, 6])) + + +def test_tensor_bad_types_to_pil_image(): + with pytest.raises(ValueError, match=r'pic should be 2/3 dimensional. Got \d+ dimensions.'): + transforms.ToPILImage()(torch.ones(1, 3, 4, 4)) + with pytest.raises(ValueError, match=r'pic should not have > 4 channels. Got \d+ channels.'): + transforms.ToPILImage()(torch.ones(6, 4, 4)) + + def test_adjust_brightness(): x_shape = [2, 2, 3] x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] From a1fd34242926b5f50dcdcbb27622a6539bf40e36 Mon Sep 17 00:00:00 2001 From: zhiqiang Date: Fri, 4 Jun 2021 00:36:26 +0800 Subject: [PATCH 02/10] Add missing error message for ValeError --- test/test_transforms.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 1584051966b..35389771fda 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1613,7 +1613,7 @@ def verify_img_data(img_data, mode): transforms.ToPILImage().__repr__() - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): # should raise if we try a mode for 4 or 1 or 3 channel images transforms.ToPILImage(mode='RGBA')(img_data) transforms.ToPILImage(mode='P')(img_data) @@ -1637,7 +1637,7 @@ def verify_img_data(img_data, expected_output, mode): for mode in [None, 'LA']: verify_img_data(img_data, expected_output, mode=mode) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): # should raise if we try a mode for 4 or 1 or 3 channel images transforms.ToPILImage(mode='RGBA')(img_data) transforms.ToPILImage(mode='P')(img_data) @@ -1698,13 +1698,14 @@ def verify_img_data(img_data, expected_output, mode): for mode in [None, 'RGB', 'HSV', 'YCbCr']: verify_img_data(img_data, expected_output, mode=mode) - with pytest.raises(ValueError): + error_message_3d = r"Only modes \['RGB', 'YCbCr', 'HSV'\] are supported for 3D inputs" + with pytest.raises(ValueError, match=error_message_3d): # should raise if we try a mode for 4 or 1 or 2 channel images transforms.ToPILImage(mode='RGBA')(img_data) transforms.ToPILImage(mode='P')(img_data) transforms.ToPILImage(mode='LA')(img_data) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'pic should be 2/3 dimensional. Got \d+ dimensions.'): transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_()) @@ -1727,7 +1728,8 @@ def verify_img_data(img_data, mode): # Checking if ToPILImage can be printed as string transforms.ToPILImage().__repr__() - with pytest.raises(ValueError): + error_message_3d = r"Only modes \['RGB', 'YCbCr', 'HSV'\] are supported for 3D inputs" + with pytest.raises(ValueError, match=error_message_3d): # should raise if we try a mode for 4 or 1 or 2 channel images transforms.ToPILImage(mode='RGBA')(img_data) transforms.ToPILImage(mode='P')(img_data) @@ -1752,7 +1754,8 @@ def verify_img_data(img_data, expected_output, mode): for mode in [None, 'RGBA', 'CMYK', 'RGBX']: verify_img_data(img_data, expected_output, mode) - with pytest.raises(ValueError): + error_message_4d = r"Only modes \['RGBA', 'CMYK', 'RGBX'\] are supported for 4D inputs" + with pytest.raises(ValueError, match=error_message_4d): # should raise if we try a mode for 3 or 1 or 2 channel images transforms.ToPILImage(mode='RGB')(img_data) transforms.ToPILImage(mode='P')(img_data) @@ -1775,7 +1778,8 @@ def verify_img_data(img_data, mode): for mode in [None, 'RGBA', 'CMYK', 'RGBX']: verify_img_data(img_data, mode) - with pytest.raises(ValueError): + error_message_4d = r"Only modes \['RGBA', 'CMYK', 'RGBX'\] are supported for 4D inputs" + with pytest.raises(ValueError, match=error_message_4d): # should raise if we try a mode for 3 or 1 or 2 channel images transforms.ToPILImage(mode='RGB')(img_data) transforms.ToPILImage(mode='P')(img_data) From 53ac33eb98ee4852888e0a67e89c943b481ca08b Mon Sep 17 00:00:00 2001 From: zhiqiang Date: Fri, 4 Jun 2021 02:02:06 +0800 Subject: [PATCH 03/10] Use pytest.mark.parametrize to replace for-loops --- test/test_transforms.py | 279 +++++++++++++++++++++------------------- 1 file changed, 146 insertions(+), 133 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 35389771fda..fb9e7ef51ea 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1578,39 +1578,38 @@ def test_1_channel_tensor_to_pil_image(): ) -def test_1_channel_ndarray_to_pil_image(): - img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy() - img_data_byte = torch.ByteTensor(4, 4, 1).random_(0, 255).numpy() - img_data_short = torch.ShortTensor(4, 4, 1).random_().numpy() - img_data_int = torch.IntTensor(4, 4, 1).random_().numpy() +@pytest.mark.parametrize('img_data, mode', [ + (torch.Tensor(4, 4, 1).uniform_().numpy(), 'F'), + (torch.ByteTensor(4, 4, 1).random_(0, 255).numpy(), 'L'), + (torch.ShortTensor(4, 4, 1).random_().numpy(), 'I;16'), + (torch.IntTensor(4, 4, 1).random_().numpy(), 'I'), +]) +def test_1_channel_ndarray_to_pil_image(img_data, mode): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + assert img.mode == mode + # note: we explicitly convert img's dtype because pytorch doesn't support uint16 + # and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array + torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) + + +@pytest.mark.parametrize('mode', [None, 'LA']) +def test_2_channel_ndarray_to_pil_image(mode): + img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() - inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] - expected_modes = ['F', 'L', 'I;16', 'I'] - for img_data, mode in zip(inputs, expected_modes): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - assert img.mode == mode - # note: we explicitly convert img's dtype because pytorch doesn't support uint16 - # and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array - torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) - - -def test_2_channel_ndarray_to_pil_image(): - def verify_img_data(img_data, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - assert img.mode == 'LA' # default should assume LA - else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode - split = img.split() - for i in range(2): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'LA' # default should assume LA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + split = img.split() + for i in range(2): + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) - img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() - for mode in [None, 'LA']: - verify_img_data(img_data, mode) +def test_2_channel_ndarray_to_pil_image_error(): + img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() transforms.ToPILImage().__repr__() with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): @@ -1620,22 +1619,24 @@ def verify_img_data(img_data, mode): transforms.ToPILImage(mode='RGB')(img_data) -def test_2_channel_tensor_to_pil_image(): - def verify_img_data(img_data, expected_output, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - assert img.mode == 'LA' # default should assume LA - else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode - split = img.split() - for i in range(2): - assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) - +@pytest.mark.parametrize('mode', [None, 'LA']) +def test_2_channel_tensor_to_pil_image(mode): img_data = torch.Tensor(2, 4, 4).uniform_() expected_output = img_data.mul(255).int().float().div(255) - for mode in [None, 'LA']: - verify_img_data(img_data, expected_output, mode=mode) + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'LA' # default should assume LA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + + split = img.split() + for i in range(2): + assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) + + +def test_2_channel_tensor_to_pil_image_error(): + img_data = torch.Tensor(2, 4, 4).uniform_() with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): # should raise if we try a mode for 4 or 1 or 3 channel images @@ -1644,60 +1645,66 @@ def verify_img_data(img_data, expected_output, mode): transforms.ToPILImage(mode='RGB')(img_data) -def test_2d_tensor_to_pil_image(): +@pytest.mark.parametrize('data_type, mode', [ + ('float', 'L'), + ('byte', 'L'), + ('short', 'I;16'), + ('int', 'I'), +]) +def test_2d_tensor_to_pil_image(data_type, mode): to_tensor = transforms.ToTensor() + if data_type == 'float': + img_data = torch.Tensor(4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255).numpy() + elif data_type == 'byte': + img_data = torch.ByteTensor(4, 4).random_(0, 255) + expected_output = img_data.float().div(255.0).numpy() + elif data_type == 'short': + img_data = torch.ShortTensor(4, 4).random_() + expected_output = img_data.numpy() + elif data_type == 'int': + img_data = torch.IntTensor(4, 4).random_() + expected_output = img_data.numpy() + else: + assert False + + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + assert img.mode == mode + np.testing.assert_allclose(expected_output, to_tensor(img).numpy()[0]) + + +@pytest.mark.parametrize('img_data, mode', [ + (torch.Tensor(4, 4).uniform_().numpy(), 'F'), + (torch.ByteTensor(4, 4).random_(0, 255).numpy(), 'L'), + (torch.ShortTensor(4, 4).random_().numpy(), 'I;16'), + (torch.IntTensor(4, 4).random_().numpy(), 'I'), +]) +def test_2d_ndarray_to_pil_image(img_data, mode): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + assert img.mode == mode + np.testing.assert_allclose(img_data, img) + + +@pytest.mark.parametrize('mode', [None, 'RGB', 'HSV', 'YCbCr']) +def test_3_channel_tensor_to_pil_image(mode): + img_data = torch.Tensor(3, 4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255) - img_data_float = torch.Tensor(4, 4).uniform_() - img_data_byte = torch.ByteTensor(4, 4).random_(0, 255) - img_data_short = torch.ShortTensor(4, 4).random_() - img_data_int = torch.IntTensor(4, 4).random_() - - inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] - expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(), - img_data_byte.float().div(255.0).numpy(), - img_data_short.numpy(), - img_data_int.numpy()] - expected_modes = ['L', 'L', 'I;16', 'I'] - - for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - assert img.mode == mode - np.testing.assert_allclose(expected_output, to_tensor(img).numpy()[0]) - - -def test_2d_ndarray_to_pil_image(): - img_data_float = torch.Tensor(4, 4).uniform_().numpy() - img_data_byte = torch.ByteTensor(4, 4).random_(0, 255).numpy() - img_data_short = torch.ShortTensor(4, 4).random_().numpy() - img_data_int = torch.IntTensor(4, 4).random_().numpy() - - inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] - expected_modes = ['F', 'L', 'I;16', 'I'] - for img_data, mode in zip(inputs, expected_modes): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - assert img.mode == mode - np.testing.assert_allclose(img_data, img) - + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'RGB' # default should assume RGB + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + split = img.split() + for i in range(3): + assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) -def test_3_channel_tensor_to_pil_image(): - def verify_img_data(img_data, expected_output, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - assert img.mode == 'RGB' # default should assume RGB - else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode - split = img.split() - for i in range(3): - assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) +def test_3_channel_tensor_to_pil_image_error(): img_data = torch.Tensor(3, 4, 4).uniform_() - expected_output = img_data.mul(255).int().float().div(255) - for mode in [None, 'RGB', 'HSV', 'YCbCr']: - verify_img_data(img_data, expected_output, mode=mode) - error_message_3d = r"Only modes \['RGB', 'YCbCr', 'HSV'\] are supported for 3D inputs" with pytest.raises(ValueError, match=error_message_3d): # should raise if we try a mode for 4 or 1 or 2 channel images @@ -1709,21 +1716,23 @@ def verify_img_data(img_data, expected_output, mode): transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_()) -def test_3_channel_ndarray_to_pil_image(): - def verify_img_data(img_data, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - assert img.mode == 'RGB' # default should assume RGB - else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode - split = img.split() - for i in range(3): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) +@pytest.mark.parametrize('mode', [None, 'RGB', 'HSV', 'YCbCr']) +def test_3_channel_ndarray_to_pil_image(mode): + img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() + + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'RGB' # default should assume RGB + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + split = img.split() + for i in range(3): + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + +def test_3_channel_ndarray_to_pil_image_error(): img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() - for mode in [None, 'RGB', 'HSV', 'YCbCr']: - verify_img_data(img_data, mode) # Checking if ToPILImage can be printed as string transforms.ToPILImage().__repr__() @@ -1736,23 +1745,25 @@ def verify_img_data(img_data, mode): transforms.ToPILImage(mode='LA')(img_data) -def test_4_channel_tensor_to_pil_image(): - def verify_img_data(img_data, expected_output, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - assert img.mode == 'RGBA' # default should assume RGBA - else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode +@pytest.mark.parametrize('mode', [None, 'RGBA', 'CMYK', 'RGBX']) +def test_4_channel_tensor_to_pil_image(mode): + img_data = torch.Tensor(4, 4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255) + + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'RGBA' # default should assume RGBA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode - split = img.split() - for i in range(4): - assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) + split = img.split() + for i in range(4): + assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) + +def test_4_channel_tensor_to_pil_image_error(): img_data = torch.Tensor(4, 4, 4).uniform_() - expected_output = img_data.mul(255).int().float().div(255) - for mode in [None, 'RGBA', 'CMYK', 'RGBX']: - verify_img_data(img_data, expected_output, mode) error_message_4d = r"Only modes \['RGBA', 'CMYK', 'RGBX'\] are supported for 4D inputs" with pytest.raises(ValueError, match=error_message_4d): @@ -1762,21 +1773,23 @@ def verify_img_data(img_data, expected_output, mode): transforms.ToPILImage(mode='LA')(img_data) -def test_4_channel_ndarray_to_pil_image(): - def verify_img_data(img_data, mode): - if mode is None: - img = transforms.ToPILImage()(img_data) - assert img.mode == 'RGBA' # default should assume RGBA - else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode - split = img.split() - for i in range(4): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) +@pytest.mark.parametrize('mode', [None, 'RGBA', 'CMYK', 'RGBX']) +def test_4_channel_ndarray_to_pil_image(mode): + img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() + + if mode is None: + img = transforms.ToPILImage()(img_data) + assert img.mode == 'RGBA' # default should assume RGBA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + assert img.mode == mode + split = img.split() + for i in range(4): + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + +def test_4_channel_ndarray_to_pil_image_error(): img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() - for mode in [None, 'RGBA', 'CMYK', 'RGBX']: - verify_img_data(img_data, mode) error_message_4d = r"Only modes \['RGBA', 'CMYK', 'RGBX'\] are supported for 4D inputs" with pytest.raises(ValueError, match=error_message_4d): From c32a6d53e8701d7992db17161b6ff29746027ee2 Mon Sep 17 00:00:00 2001 From: zhiqiang Date: Fri, 4 Jun 2021 02:23:01 +0800 Subject: [PATCH 04/10] Using pytest.mark.parametrize to replace for-loops --- test/test_transforms.py | 47 +++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index fb9e7ef51ea..a296d47e50e 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1549,32 +1549,43 @@ def test_randomness(fn, trans, config, p): assert p_value > 0.0001 -def test_1_channel_tensor_to_pil_image(): +@pytest.mark.parametrize('data_type, mode', [ + ('float', 'L'), + ('byte', 'L'), + ('short', 'I;16'), + ('int', 'I'), +]) +def test_1_channel_tensor_to_pil_image(data_type, mode): to_tensor = transforms.ToTensor() - img_data_float = torch.Tensor(1, 4, 4).uniform_() - img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255) - img_data_short = torch.ShortTensor(1, 4, 4).random_() - img_data_int = torch.IntTensor(1, 4, 4).random_() + if data_type == 'float': + img_data = torch.Tensor(1, 4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255).numpy() + elif data_type == 'byte': + img_data = torch.ByteTensor(1, 4, 4).random_(0, 255) + expected_output = img_data.float().div(255.0).numpy() + elif data_type == 'short': + img_data = torch.ShortTensor(1, 4, 4).random_() + expected_output = img_data.numpy() + elif data_type == 'int': + img_data = torch.IntTensor(1, 4, 4).random_() + expected_output = img_data.numpy() + else: + assert False - inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] - expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(), - img_data_byte.float().div(255.0).numpy(), - img_data_short.numpy(), - img_data_int.numpy()] - expected_modes = ['L', 'L', 'I;16', 'I'] + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + assert img.mode == mode + torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) - for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - assert img.mode == mode - torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) +def test_1_channel_tensor_to_pil_image_error(): + img_data = torch.Tensor(1, 4, 4).uniform_() # 'F' mode for torch.FloatTensor - img_F_mode = transforms.ToPILImage(mode='F')(img_data_float) + img_F_mode = transforms.ToPILImage(mode='F')(img_data) assert img_F_mode.mode == 'F' torch.testing.assert_close( - np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')), np.array(img_F_mode) + np.array(Image.fromarray(img_data.squeeze(0).numpy(), mode='F')), np.array(img_F_mode) ) From b1d0830ada89cfe9bbe8d50b1eda2e34df51fef2 Mon Sep 17 00:00:00 2001 From: zhiqwang Date: Sat, 5 Jun 2021 09:34:43 -0400 Subject: [PATCH 05/10] Parametrize over img_data and remove dependencies of np.all_close --- test/test_transforms.py | 123 ++++++++++++++++++++-------------------- 1 file changed, 62 insertions(+), 61 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index a296d47e50e..4cb224056d8 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1549,37 +1549,36 @@ def test_randomness(fn, trans, config, p): assert p_value > 0.0001 -@pytest.mark.parametrize('data_type, mode', [ - ('float', 'L'), - ('byte', 'L'), - ('short', 'I;16'), - ('int', 'I'), -]) -def test_1_channel_tensor_to_pil_image(data_type, mode): - to_tensor = transforms.ToTensor() +def _get_1_channel_tensor_various_types(): + img_data_float = torch.Tensor(1, 4, 4).uniform_() + expected_output = img_data_float.mul(255).int().float().div(255).numpy() + yield img_data_float, expected_output, 'L' - if data_type == 'float': - img_data = torch.Tensor(1, 4, 4).uniform_() - expected_output = img_data.mul(255).int().float().div(255).numpy() - elif data_type == 'byte': - img_data = torch.ByteTensor(1, 4, 4).random_(0, 255) - expected_output = img_data.float().div(255.0).numpy() - elif data_type == 'short': - img_data = torch.ShortTensor(1, 4, 4).random_() - expected_output = img_data.numpy() - elif data_type == 'int': - img_data = torch.IntTensor(1, 4, 4).random_() - expected_output = img_data.numpy() - else: - assert False + img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255) + expected_output = img_data_byte.float().div(255.0).numpy() + yield img_data_byte, expected_output, 'L' - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - assert img.mode == mode - torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) + img_data_short = torch.ShortTensor(1, 4, 4).random_() + expected_output = img_data_short.numpy() + yield img_data_short, expected_output, 'I;16' + + img_data_int = torch.IntTensor(1, 4, 4).random_() + expected_output = img_data_int.numpy() + yield img_data_int, expected_output, 'I' -def test_1_channel_tensor_to_pil_image_error(): +@pytest.mark.parametrize('pass_mode', [False, True]) +@pytest.mark.parametrize('img_data, expected_output, expected_mode', _get_1_channel_tensor_various_types()) +def test_1_channel_tensor_to_pil_image(pass_mode, img_data, expected_output, expected_mode): + transform = transforms.ToPILImage(mode=expected_mode) if pass_mode else transforms.ToPILImage() + to_tensor = transforms.ToTensor() + + img = transform(img_data) + assert img.mode == expected_mode + torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) + + +def test_1_channel_float_tensor_to_pil_image(): img_data = torch.Tensor(1, 4, 4).uniform_() # 'F' mode for torch.FloatTensor img_F_mode = transforms.ToPILImage(mode='F')(img_data) @@ -1643,7 +1642,7 @@ def test_2_channel_tensor_to_pil_image(mode): split = img.split() for i in range(2): - assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) + torch.testing.assert_close(expected_output[i].numpy(), F.to_tensor(split[i]).squeeze(0).numpy()) def test_2_channel_tensor_to_pil_image_error(): @@ -1656,46 +1655,48 @@ def test_2_channel_tensor_to_pil_image_error(): transforms.ToPILImage(mode='RGB')(img_data) -@pytest.mark.parametrize('data_type, mode', [ - ('float', 'L'), - ('byte', 'L'), - ('short', 'I;16'), - ('int', 'I'), -]) -def test_2d_tensor_to_pil_image(data_type, mode): +def _get_2d_tensor_various_types(): + img_data_float = torch.Tensor(4, 4).uniform_() + expected_output = img_data_float.mul(255).int().float().div(255).numpy() + yield img_data_float, expected_output, 'L' + + img_data_byte = torch.ByteTensor(4, 4).random_(0, 255) + expected_output = img_data_byte.float().div(255.0).numpy() + yield img_data_byte, expected_output, 'L' + + img_data_short = torch.ShortTensor(4, 4).random_() + expected_output = img_data_short.numpy() + yield img_data_short, expected_output, 'I;16' + + img_data_int = torch.IntTensor(4, 4).random_() + expected_output = img_data_int.numpy() + yield img_data_int, expected_output, 'I' + + + +@pytest.mark.parametrize('pass_mode', [False, True]) +@pytest.mark.parametrize('img_data, expected_output, expected_mode', _get_2d_tensor_various_types()) +def test_2d_tensor_to_pil_image(pass_mode, img_data, expected_output, expected_mode): + transform = transforms.ToPILImage(mode=expected_mode) if pass_mode else transforms.ToPILImage() to_tensor = transforms.ToTensor() - if data_type == 'float': - img_data = torch.Tensor(4, 4).uniform_() - expected_output = img_data.mul(255).int().float().div(255).numpy() - elif data_type == 'byte': - img_data = torch.ByteTensor(4, 4).random_(0, 255) - expected_output = img_data.float().div(255.0).numpy() - elif data_type == 'short': - img_data = torch.ShortTensor(4, 4).random_() - expected_output = img_data.numpy() - elif data_type == 'int': - img_data = torch.IntTensor(4, 4).random_() - expected_output = img_data.numpy() - else: - assert False - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - assert img.mode == mode - np.testing.assert_allclose(expected_output, to_tensor(img).numpy()[0]) + img = transform(img_data) + assert img.mode == expected_mode + torch.testing.assert_close(expected_output, to_tensor(img).numpy()[0]) -@pytest.mark.parametrize('img_data, mode', [ +@pytest.mark.parametrize('pass_mode', [False, True]) +@pytest.mark.parametrize('img_data, expected_mode', [ (torch.Tensor(4, 4).uniform_().numpy(), 'F'), (torch.ByteTensor(4, 4).random_(0, 255).numpy(), 'L'), (torch.ShortTensor(4, 4).random_().numpy(), 'I;16'), (torch.IntTensor(4, 4).random_().numpy(), 'I'), ]) -def test_2d_ndarray_to_pil_image(img_data, mode): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: - img = transform(img_data) - assert img.mode == mode - np.testing.assert_allclose(img_data, img) +def test_2d_ndarray_to_pil_image(pass_mode, img_data, expected_mode): + transform = transforms.ToPILImage(mode=expected_mode) if pass_mode else transforms.ToPILImage() + img = transform(img_data) + assert img.mode == expected_mode + np.testing.assert_allclose(img_data, img) @pytest.mark.parametrize('mode', [None, 'RGB', 'HSV', 'YCbCr']) @@ -1711,7 +1712,7 @@ def test_3_channel_tensor_to_pil_image(mode): assert img.mode == mode split = img.split() for i in range(3): - assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) + torch.testing.assert_close(expected_output[i].numpy(), F.to_tensor(split[i]).squeeze(0).numpy()) def test_3_channel_tensor_to_pil_image_error(): @@ -1770,7 +1771,7 @@ def test_4_channel_tensor_to_pil_image(mode): split = img.split() for i in range(4): - assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()) + torch.testing.assert_close(expected_output[i].numpy(), F.to_tensor(split[i]).squeeze(0).numpy()) def test_4_channel_tensor_to_pil_image_error(): From a49ff4814160abb0dfd6166aa834d6c566e20cc7 Mon Sep 17 00:00:00 2001 From: zhiqwang Date: Sat, 5 Jun 2021 09:43:34 -0400 Subject: [PATCH 06/10] Rename to expected_mode in pytest parametrize --- test/test_transforms.py | 69 ++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 35 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 4cb224056d8..33ddc00eac6 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1588,31 +1588,31 @@ def test_1_channel_float_tensor_to_pil_image(): ) -@pytest.mark.parametrize('img_data, mode', [ +@pytest.mark.parametrize('img_data, expected_mode', [ (torch.Tensor(4, 4, 1).uniform_().numpy(), 'F'), (torch.ByteTensor(4, 4, 1).random_(0, 255).numpy(), 'L'), (torch.ShortTensor(4, 4, 1).random_().numpy(), 'I;16'), (torch.IntTensor(4, 4, 1).random_().numpy(), 'I'), ]) -def test_1_channel_ndarray_to_pil_image(img_data, mode): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: +def test_1_channel_ndarray_to_pil_image(img_data, expected_mode): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=expected_mode)]: img = transform(img_data) - assert img.mode == mode + assert img.mode == expected_mode # note: we explicitly convert img's dtype because pytorch doesn't support uint16 # and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) -@pytest.mark.parametrize('mode', [None, 'LA']) -def test_2_channel_ndarray_to_pil_image(mode): +@pytest.mark.parametrize('expected_mode', [None, 'LA']) +def test_2_channel_ndarray_to_pil_image(expected_mode): img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() - if mode is None: + if expected_mode is None: img = transforms.ToPILImage()(img_data) assert img.mode == 'LA' # default should assume LA else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode + img = transforms.ToPILImage(mode=expected_mode)(img_data) + assert img.mode == expected_mode split = img.split() for i in range(2): torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) @@ -1629,16 +1629,16 @@ def test_2_channel_ndarray_to_pil_image_error(): transforms.ToPILImage(mode='RGB')(img_data) -@pytest.mark.parametrize('mode', [None, 'LA']) -def test_2_channel_tensor_to_pil_image(mode): +@pytest.mark.parametrize('expected_mode', [None, 'LA']) +def test_2_channel_tensor_to_pil_image(expected_mode): img_data = torch.Tensor(2, 4, 4).uniform_() expected_output = img_data.mul(255).int().float().div(255) - if mode is None: + if expected_mode is None: img = transforms.ToPILImage()(img_data) assert img.mode == 'LA' # default should assume LA else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode + img = transforms.ToPILImage(mode=expected_mode)(img_data) + assert img.mode == expected_mode split = img.split() for i in range(2): @@ -1673,7 +1673,6 @@ def _get_2d_tensor_various_types(): yield img_data_int, expected_output, 'I' - @pytest.mark.parametrize('pass_mode', [False, True]) @pytest.mark.parametrize('img_data, expected_output, expected_mode', _get_2d_tensor_various_types()) def test_2d_tensor_to_pil_image(pass_mode, img_data, expected_output, expected_mode): @@ -1699,17 +1698,17 @@ def test_2d_ndarray_to_pil_image(pass_mode, img_data, expected_mode): np.testing.assert_allclose(img_data, img) -@pytest.mark.parametrize('mode', [None, 'RGB', 'HSV', 'YCbCr']) -def test_3_channel_tensor_to_pil_image(mode): +@pytest.mark.parametrize('expected_mode', [None, 'RGB', 'HSV', 'YCbCr']) +def test_3_channel_tensor_to_pil_image(expected_mode): img_data = torch.Tensor(3, 4, 4).uniform_() expected_output = img_data.mul(255).int().float().div(255) - if mode is None: + if expected_mode is None: img = transforms.ToPILImage()(img_data) assert img.mode == 'RGB' # default should assume RGB else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode + img = transforms.ToPILImage(mode=expected_mode)(img_data) + assert img.mode == expected_mode split = img.split() for i in range(3): torch.testing.assert_close(expected_output[i].numpy(), F.to_tensor(split[i]).squeeze(0).numpy()) @@ -1728,16 +1727,16 @@ def test_3_channel_tensor_to_pil_image_error(): transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_()) -@pytest.mark.parametrize('mode', [None, 'RGB', 'HSV', 'YCbCr']) -def test_3_channel_ndarray_to_pil_image(mode): +@pytest.mark.parametrize('expected_mode', [None, 'RGB', 'HSV', 'YCbCr']) +def test_3_channel_ndarray_to_pil_image(expected_mode): img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() - if mode is None: + if expected_mode is None: img = transforms.ToPILImage()(img_data) assert img.mode == 'RGB' # default should assume RGB else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode + img = transforms.ToPILImage(mode=expected_mode)(img_data) + assert img.mode == expected_mode split = img.split() for i in range(3): torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) @@ -1757,17 +1756,17 @@ def test_3_channel_ndarray_to_pil_image_error(): transforms.ToPILImage(mode='LA')(img_data) -@pytest.mark.parametrize('mode', [None, 'RGBA', 'CMYK', 'RGBX']) -def test_4_channel_tensor_to_pil_image(mode): +@pytest.mark.parametrize('expected_mode', [None, 'RGBA', 'CMYK', 'RGBX']) +def test_4_channel_tensor_to_pil_image(expected_mode): img_data = torch.Tensor(4, 4, 4).uniform_() expected_output = img_data.mul(255).int().float().div(255) - if mode is None: + if expected_mode is None: img = transforms.ToPILImage()(img_data) assert img.mode == 'RGBA' # default should assume RGBA else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode + img = transforms.ToPILImage(mode=expected_mode)(img_data) + assert img.mode == expected_mode split = img.split() for i in range(4): @@ -1785,16 +1784,16 @@ def test_4_channel_tensor_to_pil_image_error(): transforms.ToPILImage(mode='LA')(img_data) -@pytest.mark.parametrize('mode', [None, 'RGBA', 'CMYK', 'RGBX']) -def test_4_channel_ndarray_to_pil_image(mode): +@pytest.mark.parametrize('expected_mode', [None, 'RGBA', 'CMYK', 'RGBX']) +def test_4_channel_ndarray_to_pil_image(expected_mode): img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() - if mode is None: + if expected_mode is None: img = transforms.ToPILImage()(img_data) assert img.mode == 'RGBA' # default should assume RGBA else: - img = transforms.ToPILImage(mode=mode)(img_data) - assert img.mode == mode + img = transforms.ToPILImage(mode=expected_mode)(img_data) + assert img.mode == expected_mode split = img.split() for i in range(4): torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) From acb101c82bb9ec44b29674933cf8ecfb1b77befa Mon Sep 17 00:00:00 2001 From: zhiqwang Date: Sat, 5 Jun 2021 10:19:56 -0400 Subject: [PATCH 07/10] Parametrize over transform with mode --- test/test_transforms.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index f1c252593e3..18d80661030 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -994,19 +994,20 @@ def test_1_channel_float_tensor_to_pil_image(): ) +@pytest.mark.parametrize('pass_mode', [False, True]) @pytest.mark.parametrize('img_data, expected_mode', [ (torch.Tensor(4, 4, 1).uniform_().numpy(), 'F'), (torch.ByteTensor(4, 4, 1).random_(0, 255).numpy(), 'L'), (torch.ShortTensor(4, 4, 1).random_().numpy(), 'I;16'), (torch.IntTensor(4, 4, 1).random_().numpy(), 'I'), ]) -def test_1_channel_ndarray_to_pil_image(img_data, expected_mode): - for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=expected_mode)]: - img = transform(img_data) - assert img.mode == expected_mode - # note: we explicitly convert img's dtype because pytorch doesn't support uint16 - # and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array - torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) +def test_1_channel_ndarray_to_pil_image(pass_mode, img_data, expected_mode): + transform = transforms.ToPILImage(mode=expected_mode) if pass_mode else transforms.ToPILImage() + img = transform(img_data) + assert img.mode == expected_mode + # note: we explicitly convert img's dtype because pytorch doesn't support uint16 + # and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array + torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) @pytest.mark.parametrize('expected_mode', [None, 'LA']) From c2f569d3d5fd06d78fc12cbc8a3393886b76e55f Mon Sep 17 00:00:00 2001 From: zhiqwang Date: Sat, 5 Jun 2021 10:20:32 -0400 Subject: [PATCH 08/10] Rename pass_mode to with_mode --- test/test_transforms.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 18d80661030..3a5b03be7e0 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -973,10 +973,10 @@ def _get_1_channel_tensor_various_types(): yield img_data_int, expected_output, 'I' -@pytest.mark.parametrize('pass_mode', [False, True]) +@pytest.mark.parametrize('with_mode', [False, True]) @pytest.mark.parametrize('img_data, expected_output, expected_mode', _get_1_channel_tensor_various_types()) -def test_1_channel_tensor_to_pil_image(pass_mode, img_data, expected_output, expected_mode): - transform = transforms.ToPILImage(mode=expected_mode) if pass_mode else transforms.ToPILImage() +def test_1_channel_tensor_to_pil_image(with_mode, img_data, expected_output, expected_mode): + transform = transforms.ToPILImage(mode=expected_mode) if with_mode else transforms.ToPILImage() to_tensor = transforms.ToTensor() img = transform(img_data) @@ -994,15 +994,15 @@ def test_1_channel_float_tensor_to_pil_image(): ) -@pytest.mark.parametrize('pass_mode', [False, True]) +@pytest.mark.parametrize('with_mode', [False, True]) @pytest.mark.parametrize('img_data, expected_mode', [ (torch.Tensor(4, 4, 1).uniform_().numpy(), 'F'), (torch.ByteTensor(4, 4, 1).random_(0, 255).numpy(), 'L'), (torch.ShortTensor(4, 4, 1).random_().numpy(), 'I;16'), (torch.IntTensor(4, 4, 1).random_().numpy(), 'I'), ]) -def test_1_channel_ndarray_to_pil_image(pass_mode, img_data, expected_mode): - transform = transforms.ToPILImage(mode=expected_mode) if pass_mode else transforms.ToPILImage() +def test_1_channel_ndarray_to_pil_image(with_mode, img_data, expected_mode): + transform = transforms.ToPILImage(mode=expected_mode) if with_mode else transforms.ToPILImage() img = transform(img_data) assert img.mode == expected_mode # note: we explicitly convert img's dtype because pytorch doesn't support uint16 @@ -1080,10 +1080,10 @@ def _get_2d_tensor_various_types(): yield img_data_int, expected_output, 'I' -@pytest.mark.parametrize('pass_mode', [False, True]) +@pytest.mark.parametrize('with_mode', [False, True]) @pytest.mark.parametrize('img_data, expected_output, expected_mode', _get_2d_tensor_various_types()) -def test_2d_tensor_to_pil_image(pass_mode, img_data, expected_output, expected_mode): - transform = transforms.ToPILImage(mode=expected_mode) if pass_mode else transforms.ToPILImage() +def test_2d_tensor_to_pil_image(with_mode, img_data, expected_output, expected_mode): + transform = transforms.ToPILImage(mode=expected_mode) if with_mode else transforms.ToPILImage() to_tensor = transforms.ToTensor() img = transform(img_data) @@ -1091,15 +1091,15 @@ def test_2d_tensor_to_pil_image(pass_mode, img_data, expected_output, expected_m torch.testing.assert_close(expected_output, to_tensor(img).numpy()[0]) -@pytest.mark.parametrize('pass_mode', [False, True]) +@pytest.mark.parametrize('with_mode', [False, True]) @pytest.mark.parametrize('img_data, expected_mode', [ (torch.Tensor(4, 4).uniform_().numpy(), 'F'), (torch.ByteTensor(4, 4).random_(0, 255).numpy(), 'L'), (torch.ShortTensor(4, 4).random_().numpy(), 'I;16'), (torch.IntTensor(4, 4).random_().numpy(), 'I'), ]) -def test_2d_ndarray_to_pil_image(pass_mode, img_data, expected_mode): - transform = transforms.ToPILImage(mode=expected_mode) if pass_mode else transforms.ToPILImage() +def test_2d_ndarray_to_pil_image(with_mode, img_data, expected_mode): + transform = transforms.ToPILImage(mode=expected_mode) if with_mode else transforms.ToPILImage() img = transform(img_data) assert img.mode == expected_mode np.testing.assert_allclose(img_data, img) From bbb0e35bdb15611cd474d6253fdbf3e8948675c4 Mon Sep 17 00:00:00 2001 From: zhiqwang Date: Sat, 5 Jun 2021 10:23:22 -0400 Subject: [PATCH 09/10] Make sure that each individual line effectively raises an error --- test/test_transforms.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 3a5b03be7e0..82627c473e1 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1029,10 +1029,12 @@ def test_2_channel_ndarray_to_pil_image_error(): img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() transforms.ToPILImage().__repr__() + # should raise if we try a mode for 4 or 1 or 3 channel images with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): - # should raise if we try a mode for 4 or 1 or 3 channel images transforms.ToPILImage(mode='RGBA')(img_data) + with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): transforms.ToPILImage(mode='P')(img_data) + with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): transforms.ToPILImage(mode='RGB')(img_data) @@ -1055,10 +1057,12 @@ def test_2_channel_tensor_to_pil_image(expected_mode): def test_2_channel_tensor_to_pil_image_error(): img_data = torch.Tensor(2, 4, 4).uniform_() + # should raise if we try a mode for 4 or 1 or 3 channel images with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): - # should raise if we try a mode for 4 or 1 or 3 channel images transforms.ToPILImage(mode='RGBA')(img_data) + with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): transforms.ToPILImage(mode='P')(img_data) + with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"): transforms.ToPILImage(mode='RGB')(img_data) From 1e8e86aacb2c3c6d2e818b9ceae6e7590eceaa7c Mon Sep 17 00:00:00 2001 From: zhiqwang Date: Sat, 5 Jun 2021 10:35:37 -0400 Subject: [PATCH 10/10] Make sure that each individual line effectively raises an error --- test/test_transforms.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 82627c473e1..bf3a030f285 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -1128,10 +1128,12 @@ def test_3_channel_tensor_to_pil_image(expected_mode): def test_3_channel_tensor_to_pil_image_error(): img_data = torch.Tensor(3, 4, 4).uniform_() error_message_3d = r"Only modes \['RGB', 'YCbCr', 'HSV'\] are supported for 3D inputs" + # should raise if we try a mode for 4 or 1 or 2 channel images with pytest.raises(ValueError, match=error_message_3d): - # should raise if we try a mode for 4 or 1 or 2 channel images transforms.ToPILImage(mode='RGBA')(img_data) + with pytest.raises(ValueError, match=error_message_3d): transforms.ToPILImage(mode='P')(img_data) + with pytest.raises(ValueError, match=error_message_3d): transforms.ToPILImage(mode='LA')(img_data) with pytest.raises(ValueError, match=r'pic should be 2/3 dimensional. Got \d+ dimensions.'): @@ -1160,10 +1162,12 @@ def test_3_channel_ndarray_to_pil_image_error(): transforms.ToPILImage().__repr__() error_message_3d = r"Only modes \['RGB', 'YCbCr', 'HSV'\] are supported for 3D inputs" + # should raise if we try a mode for 4 or 1 or 2 channel images with pytest.raises(ValueError, match=error_message_3d): - # should raise if we try a mode for 4 or 1 or 2 channel images transforms.ToPILImage(mode='RGBA')(img_data) + with pytest.raises(ValueError, match=error_message_3d): transforms.ToPILImage(mode='P')(img_data) + with pytest.raises(ValueError, match=error_message_3d): transforms.ToPILImage(mode='LA')(img_data) @@ -1188,10 +1192,12 @@ def test_4_channel_tensor_to_pil_image_error(): img_data = torch.Tensor(4, 4, 4).uniform_() error_message_4d = r"Only modes \['RGBA', 'CMYK', 'RGBX'\] are supported for 4D inputs" + # should raise if we try a mode for 3 or 1 or 2 channel images with pytest.raises(ValueError, match=error_message_4d): - # should raise if we try a mode for 3 or 1 or 2 channel images transforms.ToPILImage(mode='RGB')(img_data) + with pytest.raises(ValueError, match=error_message_4d): transforms.ToPILImage(mode='P')(img_data) + with pytest.raises(ValueError, match=error_message_4d): transforms.ToPILImage(mode='LA')(img_data) @@ -1214,10 +1220,12 @@ def test_4_channel_ndarray_to_pil_image_error(): img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() error_message_4d = r"Only modes \['RGBA', 'CMYK', 'RGBX'\] are supported for 4D inputs" + # should raise if we try a mode for 3 or 1 or 2 channel images with pytest.raises(ValueError, match=error_message_4d): - # should raise if we try a mode for 3 or 1 or 2 channel images transforms.ToPILImage(mode='RGB')(img_data) + with pytest.raises(ValueError, match=error_message_4d): transforms.ToPILImage(mode='P')(img_data) + with pytest.raises(ValueError, match=error_message_4d): transforms.ToPILImage(mode='LA')(img_data)