diff --git a/test/_assert_utils.py b/test/_assert_utils.py deleted file mode 100644 index e766e2df4b8..00000000000 --- a/test/_assert_utils.py +++ /dev/null @@ -1,11 +0,0 @@ -"""This is a temporary module and should be removed as soon as torch.testing.assert_equal is supported.""" -# TODO: remove this as soon torch.testing.assert_equal is supported - -import functools - -import torch.testing - -__all__ = ["assert_equal"] - - -assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) diff --git a/test/common_utils.py b/test/common_utils.py index 3f8ad8a7f55..1da5226f425 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -9,6 +9,7 @@ import __main__ import random import inspect +import functools from numbers import Number from torch._six import string_classes @@ -17,8 +18,6 @@ import numpy as np from PIL import Image -from _assert_utils import assert_equal - IS_PY39 = sys.version_info.major == 3 and sys.version_info.minor == 9 PY39_SEGFAULT_SKIP_MSG = "Segmentation fault with Python 3.9, see https://github.com/pytorch/vision/issues/3367" PY39_SKIP = unittest.skipIf(IS_PY39, PY39_SEGFAULT_SKIP_MSG) @@ -268,6 +267,9 @@ def _create_data_batch(height=3, width=3, channels=3, num_samples=4, device="cpu return batch_tensor +assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) + + def _assert_equal_tensor_to_pil(tensor, pil_image, msg=None): np_pil_image = np.array(pil_image) if np_pil_image.ndim == 2: @@ -275,7 +277,7 @@ def _assert_equal_tensor_to_pil(tensor, pil_image, msg=None): pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1))) if msg is None: msg = "tensor:\n{} \ndid not equal PIL tensor:\n{}".format(tensor, pil_tensor) - assert_equal(tensor.cpu(), pil_tensor, check_stride=False, msg=msg) + assert_equal(tensor.cpu(), pil_tensor, msg=msg) def _assert_approx_equal_tensor_to_pil(tensor, pil_image, tol=1e-5, msg=None, agg_method="mean", diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index be2fab8e0dd..7754c1a98e8 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -13,8 +13,7 @@ from torchvision.datasets.video_utils import VideoClips, unfold from torchvision import get_video_backend -from common_utils import get_tmp_dir -from _assert_utils import assert_equal +from common_utils import get_tmp_dir, assert_equal @contextlib.contextmanager diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index 389ad5196af..00db0aad127 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -6,8 +6,7 @@ from torchvision import io from torchvision.datasets.video_utils import VideoClips, unfold -from common_utils import get_tmp_dir -from _assert_utils import assert_equal +from common_utils import get_tmp_dir, assert_equal @contextlib.contextmanager @@ -41,7 +40,7 @@ def test_unfold(self): [0, 1, 2], [3, 4, 5], ]) - assert_equal(r, expected, check_stride=False) + assert_equal(r, expected) r = unfold(a, 3, 2, 1) expected = torch.tensor([ @@ -49,14 +48,14 @@ def test_unfold(self): [2, 3, 4], [4, 5, 6] ]) - assert_equal(r, expected, check_stride=False) + assert_equal(r, expected) r = unfold(a, 3, 2, 2) expected = torch.tensor([ [0, 2, 4], [2, 4, 6], ]) - assert_equal(r, expected, check_stride=False) + assert_equal(r, expected) @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") def test_video_clips(self): diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index a02636ccdf9..5ce82304569 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -21,8 +21,8 @@ _assert_equal_tensor_to_pil, _assert_approx_equal_tensor_to_pil, _test_fn_on_batch, + assert_equal, ) -from _assert_utils import assert_equal from typing import Dict, List, Sequence, Tuple @@ -187,11 +187,7 @@ def test_square_rotations(self, device, height, width, dt, angle, config, fn): tensor, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST ) if config is not None: - assert_equal( - torch.rot90(tensor, **config), - out_tensor, - check_stride=False, - ) + assert_equal(torch.rot90(tensor, **config), out_tensor) if out_tensor.dtype != torch.uint8: out_tensor = out_tensor.to(torch.uint8) @@ -856,7 +852,6 @@ def test_resized_crop(device, mode): assert_equal( expected_out_tensor, out_tensor, - check_stride=False, msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]), ) @@ -1001,10 +996,7 @@ def test_gaussian_blur(device, image_size, dt, ksize, sigma, fn): ).reshape(shape[-2], shape[-1], shape[-3]).permute(2, 0, 1).to(tensor) out = fn(tensor, kernel_size=ksize, sigma=sigma) - torch.testing.assert_close( - out, true_out, rtol=0.0, atol=1.0, check_stride=False, - msg="{}, {}".format(ksize, sigma) - ) + torch.testing.assert_close(out, true_out, rtol=0.0, atol=1.0, msg="{}, {}".format(ksize, sigma)) @pytest.mark.parametrize('device', cpu_and_gpu()) diff --git a/test/test_image.py b/test/test_image.py index 0ed778ab1ea..e7e5b8b197d 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -9,8 +9,7 @@ import torch from PIL import Image, __version__ as PILLOW_VERSION import torchvision.transforms.functional as F -from common_utils import get_tmp_dir, needs_cuda -from _assert_utils import assert_equal +from common_utils import get_tmp_dir, needs_cuda, assert_equal from torchvision.io.image import ( decode_png, decode_jpeg, encode_jpeg, write_jpeg, decode_image, read_file, @@ -280,7 +279,7 @@ def test_read_1_bit_png(shape): img.save(image_path) img1 = read_image(image_path) img2 = normalize_dimensions(torch.as_tensor(pixels * 255, dtype=torch.uint8)) - assert_equal(img1, img2, check_stride=False) + assert_equal(img1, img2) @pytest.mark.parametrize('shape', [ diff --git a/test/test_io.py b/test/test_io.py index 93164a9997f..56cd0af5fd8 100644 --- a/test/test_io.py +++ b/test/test_io.py @@ -9,8 +9,7 @@ import warnings from urllib.error import URLError -from common_utils import get_tmp_dir -from _assert_utils import assert_equal +from common_utils import get_tmp_dir, assert_equal try: diff --git a/test/test_models_detection_anchor_utils.py b/test/test_models_detection_anchor_utils.py index c918d3fc8df..4477e9e1566 100644 --- a/test/test_models_detection_anchor_utils.py +++ b/test/test_models_detection_anchor_utils.py @@ -1,6 +1,5 @@ import torch -from common_utils import TestCase -from _assert_utils import assert_equal +from common_utils import TestCase, assert_equal from torchvision.models.detection.anchor_utils import AnchorGenerator, DefaultBoxGenerator from torchvision.models.detection.image_list import ImageList import pytest diff --git a/test/test_models_detection_negative_samples.py b/test/test_models_detection_negative_samples.py index 51749011dd4..a4b7064b338 100644 --- a/test/test_models_detection_negative_samples.py +++ b/test/test_models_detection_negative_samples.py @@ -7,7 +7,7 @@ from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead import pytest -from _assert_utils import assert_equal +from common_utils import assert_equal class TestModelsDetectionNegativeSamples: diff --git a/test/test_models_detection_utils.py b/test/test_models_detection_utils.py index bb50e237544..b599bbeaea1 100644 --- a/test/test_models_detection_utils.py +++ b/test/test_models_detection_utils.py @@ -4,7 +4,7 @@ from torchvision.models.detection.transform import GeneralizedRCNNTransform import pytest from torchvision.models.detection import backbone_utils -from _assert_utils import assert_equal +from common_utils import assert_equal class TestModelsDetectionUtils: diff --git a/test/test_onnx.py b/test/test_onnx.py index c9455fbd86a..c093ccb4863 100644 --- a/test/test_onnx.py +++ b/test/test_onnx.py @@ -6,8 +6,7 @@ except ImportError: onnxruntime = None -from common_utils import set_rng_seed -from _assert_utils import assert_equal +from common_utils import set_rng_seed, assert_equal import io import torch from torchvision import ops diff --git a/test/test_ops.py b/test/test_ops.py index a776268a5b4..5c2fc882902 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1,5 +1,4 @@ -from common_utils import needs_cuda, cpu_and_gpu -from _assert_utils import assert_equal +from common_utils import needs_cuda, cpu_and_gpu, assert_equal import math from abc import ABC, abstractmethod import pytest diff --git a/test/test_transforms.py b/test/test_transforms.py index e86b6959517..74757bcb4e6 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -19,8 +19,7 @@ except ImportError: stats = None -from common_utils import cycle_over, int_dtypes, float_dtypes -from _assert_utils import assert_equal +from common_utils import cycle_over, int_dtypes, float_dtypes, assert_equal GRACE_HOPPER = get_file_path_2( @@ -159,7 +158,7 @@ def test_accimage_pil_to_tensor(self): output = trans(accimage.Image(GRACE_HOPPER)) assert expected_output.size() == output.size() - torch.testing.assert_close(output, expected_output, check_stride=False) + torch.testing.assert_close(output, expected_output) def test_accimage_resize(self): trans = transforms.Compose([ @@ -205,23 +204,23 @@ def test_to_tensor(self, channels): input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255) img = transforms.ToPILImage()(input_data) output = trans(img) - torch.testing.assert_close(output, input_data, check_stride=False) + torch.testing.assert_close(output, input_data) ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) output = trans(ndarray) expected_output = ndarray.transpose((2, 0, 1)) / 255.0 - torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False) + torch.testing.assert_close(output.numpy(), expected_output, check_dtype=False) ndarray = np.random.rand(height, width, channels).astype(np.float32) output = trans(ndarray) expected_output = ndarray.transpose((2, 0, 1)) - torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False) + torch.testing.assert_close(output.numpy(), expected_output, check_dtype=False) # separate test for mode '1' PIL images input_data = torch.ByteTensor(1, height, width).bernoulli_() img = transforms.ToPILImage()(input_data.mul(255)).convert('1') output = trans(img) - torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False) + torch.testing.assert_close(input_data, output, check_dtype=False) def test_to_tensor_errors(self): height, width = 4, 4 @@ -258,7 +257,7 @@ def test_pil_to_tensor(self, channels): input_data = torch.ByteTensor(channels, height, width).random_(0, 255) img = transforms.ToPILImage()(input_data) output = trans(img) - torch.testing.assert_close(input_data, output, check_stride=False) + torch.testing.assert_close(input_data, output) input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) img = transforms.ToPILImage()(input_data) @@ -270,13 +269,13 @@ def test_pil_to_tensor(self, channels): img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte() output = trans(img) # HWC -> CHW expected_output = (input_data * 255).byte() - torch.testing.assert_close(output, expected_output, check_stride=False) + torch.testing.assert_close(output, expected_output) # separate test for mode '1' PIL images input_data = torch.ByteTensor(1, height, width).bernoulli_() img = transforms.ToPILImage()(input_data.mul(255)).convert('1') output = trans(img).view(torch.uint8).bool().to(torch.uint8) - torch.testing.assert_close(input_data, output, check_stride=False) + torch.testing.assert_close(input_data, output) def test_pil_to_tensor_errors(self): height, width = 4, 4 @@ -420,10 +419,10 @@ def test_pad(self): h_padded = result[:, :padding, :] w_padded = result[:, :, :padding] torch.testing.assert_close( - h_padded, torch.full_like(h_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps + h_padded, torch.full_like(h_padded, fill_value=fill_v), rtol=0.0, atol=eps ) torch.testing.assert_close( - w_padded, torch.full_like(w_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps + w_padded, torch.full_like(w_padded, fill_value=fill_v), rtol=0.0, atol=eps ) pytest.raises(ValueError, transforms.Pad(padding, fill=(1, 2)), transforms.ToPILImage()(img)) @@ -457,7 +456,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0 edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6] - assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0], dtype=np.uint8), check_stride=False) + assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0], dtype=np.uint8)) assert transforms.ToTensor()(edge_padded_img).size() == (3, 35, 35) # Pad 3 to left/right, 2 to top/bottom @@ -465,7 +464,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0 reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6] - assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0], dtype=np.uint8), check_stride=False) + assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0], dtype=np.uint8)) assert transforms.ToTensor()(reflect_padded_img).size() == (3, 33, 35) # Pad 3 to left, 2 to top, 2 to right, 1 to bottom @@ -473,7 +472,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0 symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6] - assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0], dtype=np.uint8), check_stride=False) + assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0], dtype=np.uint8)) assert transforms.ToTensor()(symmetric_padded_img).size() == (3, 32, 34) # Check negative padding explicitly for symmetric case, since it is not @@ -482,8 +481,8 @@ def test_pad_with_non_constant_padding_modes(self): symmetric_padded_img_neg = F.pad(img, (-1, 2, 3, -3), padding_mode='symmetric') symmetric_neg_middle_left = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][:3] symmetric_neg_middle_right = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][-4:] - assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0], dtype=np.uint8), check_stride=False) - assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0], dtype=np.uint8), check_stride=False) + assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0], dtype=np.uint8)) + assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0], dtype=np.uint8)) assert transforms.ToTensor()(symmetric_padded_img_neg).size() == (3, 28, 31) def test_pad_raises_with_invalid_pad_sequence_len(self): @@ -502,7 +501,7 @@ def test_pad_with_mode_F_images(self): img = Image.new("F", (10, 10)) padded_img = transform(img) - assert_equal(padded_img.size, [edge_size + 2 * pad for edge_size in img.size], check_stride=False) + assert_equal(padded_img.size, [edge_size + 2 * pad for edge_size in img.size]) @pytest.mark.skipif(stats is None, reason="scipy.stats not available") @@ -579,7 +578,7 @@ def test_1_channel_tensor_to_pil_image(self, with_mode, img_data, expected_outpu img = transform(img_data) assert img.mode == expected_mode - torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) + torch.testing.assert_close(expected_output, to_tensor(img).numpy()) def test_1_channel_float_tensor_to_pil_image(self): img_data = torch.Tensor(1, 4, 4).uniform_() @@ -617,7 +616,7 @@ def test_2_channel_ndarray_to_pil_image(self, expected_mode): assert img.mode == expected_mode split = img.split() for i in range(2): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i])) def test_2_channel_ndarray_to_pil_image_error(self): img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() @@ -721,7 +720,7 @@ def test_3_channel_ndarray_to_pil_image(self, expected_mode): assert img.mode == expected_mode split = img.split() for i in range(3): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i])) def test_3_channel_ndarray_to_pil_image_error(self): img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() @@ -778,7 +777,7 @@ def test_4_channel_ndarray_to_pil_image(self, expected_mode): assert img.mode == expected_mode split = img.split() for i in range(4): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i])) def test_4_channel_ndarray_to_pil_image_error(self): img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() @@ -1152,7 +1151,7 @@ def test_to_grayscale(): assert gray_np_2.shape == tuple(x_shape), 'should be 3 channel' assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) - assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False) + assert_equal(gray_np, gray_np_2[:, :, 0]) # Case 3: 1 channel grayscale -> 1 channel grayscale trans3 = transforms.Grayscale(num_output_channels=1) @@ -1170,7 +1169,7 @@ def test_to_grayscale(): assert gray_np_4.shape == tuple(x_shape), 'should be 3 channel' assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1]) assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2]) - assert_equal(gray_np, gray_np_4[:, :, 0], check_stride=False) + assert_equal(gray_np, gray_np_4[:, :, 0]) # Checking if Grayscale can be printed as string trans4.__repr__() @@ -1240,7 +1239,7 @@ def test_random_grayscale(): assert gray_np_2.shape == tuple(x_shape), 'should be 3 channel' assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) - assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False) + assert_equal(gray_np, gray_np_2[:, :, 0]) # Case 3b: RGB -> 3 channel grayscale (unchanged) trans2 = transforms.RandomGrayscale(p=0.0) @@ -1600,8 +1599,9 @@ def test_center_crop_2(odd_image_size, delta, delta_width, delta_height): # Ensure output for PIL and Tensor are equal assert_equal( - output_tensor, output_pil, check_stride=False, - msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + output_tensor, + output_pil, + msg="image_size: {} crop_size: {}".format(input_image_size, crop_size), ) # Check if content in center of both image and cropped output is same. @@ -1625,7 +1625,7 @@ def test_center_crop_2(odd_image_size, delta, delta_width, delta_height): input_center_tl[1]:input_center_tl[1] + center_size[1] ] - assert_equal(output_center, img_center, check_stride=False) + assert_equal(output_center, img_center) def test_color_jitter(): diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index 84e9bd7408c..0bf5d77716f 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -18,8 +18,8 @@ _assert_equal_tensor_to_pil, _assert_approx_equal_tensor_to_pil, cpu_and_gpu, + assert_equal, ) -from _assert_utils import assert_equal NEAREST, BILINEAR, BICUBIC = InterpolationMode.NEAREST, InterpolationMode.BILINEAR, InterpolationMode.BICUBIC diff --git a/test/test_transforms_video.py b/test/test_transforms_video.py index 81b65ef0a6d..1b6b85a29ba 100644 --- a/test/test_transforms_video.py +++ b/test/test_transforms_video.py @@ -4,7 +4,7 @@ import random import numpy as np import warnings -from _assert_utils import assert_equal +from common_utils import assert_equal try: from scipy import stats diff --git a/test/test_utils.py b/test/test_utils.py index 4c3d71e3776..37829b906f1 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -9,7 +9,7 @@ from io import BytesIO import torchvision.transforms.functional as F from PIL import Image, __version__ as PILLOW_VERSION, ImageColor -from _assert_utils import assert_equal +from common_utils import assert_equal PILLOW_VERSION = tuple(int(x) for x in PILLOW_VERSION.split('.')) diff --git a/test/test_video_reader.py b/test/test_video_reader.py index 0a739673298..10a6c242a1e 100644 --- a/test/test_video_reader.py +++ b/test/test_video_reader.py @@ -10,8 +10,7 @@ from numpy.random import randint from torchvision import set_video_backend from torchvision.io import _HAS_VIDEO_OPT -from common_utils import PY39_SKIP -from _assert_utils import assert_equal +from common_utils import PY39_SKIP, assert_equal try: