Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removed type op from exception errors #2729

Merged
merged 2 commits into from Sep 29, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
33 changes: 32 additions & 1 deletion test/test_transforms_tensor.py
@@ -1,3 +1,4 @@
import os
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
Expand All @@ -8,7 +9,7 @@

import unittest

from common_utils import TransformsTester
from common_utils import TransformsTester, get_tmp_dir


class Tester(TransformsTester):
Expand Down Expand Up @@ -73,6 +74,9 @@ def _test_class_op(self, method, meth_kwargs=None, test_exact_match=True, **matc
batch_tensors = self._create_data_batch(height=23, width=34, channels=3, num_samples=4, device=self.device)
self._test_transform_vs_scripted_on_batch(f, scripted_fn, batch_tensors)

with get_tmp_dir() as tmp_dir:
scripted_fn.save(os.path.join(tmp_dir, "t_{}.pt".format(method)))

def _test_op(self, func, method, fn_kwargs=None, meth_kwargs=None):
self._test_functional_op(func, fn_kwargs)
self._test_class_op(method, meth_kwargs)
Expand Down Expand Up @@ -188,6 +192,9 @@ def test_center_crop(self):
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)

with get_tmp_dir() as tmp_dir:
scripted_fn.save(os.path.join(tmp_dir, "t_center_crop.pt"))

def _test_op_list_output(self, func, method, out_length, fn_kwargs=None, meth_kwargs=None):
if fn_kwargs is None:
fn_kwargs = {}
Expand Down Expand Up @@ -231,6 +238,9 @@ def _test_op_list_output(self, func, method, out_length, fn_kwargs=None, meth_kw
self.assertTrue(transformed_img.equal(transformed_batch[i, ...]),
msg="{} vs {}".format(transformed_img, transformed_batch[i, ...]))

with get_tmp_dir() as tmp_dir:
scripted_fn.save(os.path.join(tmp_dir, "t_op_list_{}.pt".format(method)))

def test_five_crop(self):
fn_kwargs = meth_kwargs = {"size": (5,)}
self._test_op_list_output(
Expand Down Expand Up @@ -294,6 +304,9 @@ def test_resize(self):
self._test_transform_vs_scripted(transform, s_transform, tensor)
self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)

with get_tmp_dir() as tmp_dir:
script_fn.save(os.path.join(tmp_dir, "t_resize.pt"))

def test_resized_crop(self):
tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 255, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
Expand All @@ -309,6 +322,9 @@ def test_resized_crop(self):
self._test_transform_vs_scripted(transform, s_transform, tensor)
self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)

with get_tmp_dir() as tmp_dir:
s_transform.save(os.path.join(tmp_dir, "t_resized_crop.pt"))

def test_random_affine(self):
tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 255, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
Expand All @@ -327,6 +343,9 @@ def test_random_affine(self):
self._test_transform_vs_scripted(transform, s_transform, tensor)
self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)

with get_tmp_dir() as tmp_dir:
s_transform.save(os.path.join(tmp_dir, "t_random_affine.pt"))

def test_random_rotate(self):
tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 255, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
Expand All @@ -343,6 +362,9 @@ def test_random_rotate(self):
self._test_transform_vs_scripted(transform, s_transform, tensor)
self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)

with get_tmp_dir() as tmp_dir:
s_transform.save(os.path.join(tmp_dir, "t_random_rotate.pt"))

def test_random_perspective(self):
tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
batch_tensors = torch.randint(0, 255, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device)
Expand All @@ -358,6 +380,9 @@ def test_random_perspective(self):
self._test_transform_vs_scripted(transform, s_transform, tensor)
self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)

with get_tmp_dir() as tmp_dir:
s_transform.save(os.path.join(tmp_dir, "t_perspective.pt"))

def test_to_grayscale(self):

meth_kwargs = {"num_output_channels": 1}
Expand Down Expand Up @@ -388,6 +413,9 @@ def test_normalize(self):
self._test_transform_vs_scripted(fn, scripted_fn, tensor)
self._test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors)

with get_tmp_dir() as tmp_dir:
scripted_fn.save(os.path.join(tmp_dir, "t_norm.pt"))

def test_linear_transformation(self):
c, h, w = 3, 24, 32

Expand All @@ -410,6 +438,9 @@ def test_linear_transformation(self):
s_transformed_batch = scripted_fn(batch_tensors)
self.assertTrue(transformed_batch.equal(s_transformed_batch))

with get_tmp_dir() as tmp_dir:
scripted_fn.save(os.path.join(tmp_dir, "t_norm.pt"))

def test_compose(self):
tensor, _ = self._create_data(26, 34, device=self.device)
tensor = tensor.to(dtype=torch.float32) / 255.0
Expand Down
14 changes: 7 additions & 7 deletions torchvision/transforms/functional_tensor.py
Expand Up @@ -15,7 +15,7 @@ def _get_image_size(img: Tensor) -> List[int]:
"""Returns (w, h) of tensor image"""
if _is_tensor_a_torch_image(img):
return [img.shape[-1], img.shape[-2]]
raise TypeError("Unexpected type {}".format(type(img)))
raise TypeError("Unexpected input type")


def _get_image_num_channels(img: Tensor) -> int:
Expand All @@ -24,7 +24,7 @@ def _get_image_num_channels(img: Tensor) -> int:
elif img.ndim > 2:
return img.shape[-3]

raise TypeError("Unexpected type {}".format(type(img)))
raise TypeError("Input ndim should be 2 or more. Got {}".format(img.ndim))


def vflip(img: Tensor) -> Tensor:
Expand Down Expand Up @@ -223,7 +223,7 @@ def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))

if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):
raise TypeError('img should be Tensor image. Got {}'.format(type(img)))
raise TypeError('Input img should be Tensor image')

orig_dtype = img.dtype
if img.dtype == torch.uint8:
Expand Down Expand Up @@ -294,7 +294,7 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
"""

if not isinstance(img, torch.Tensor):
raise TypeError('img should be a Tensor. Got {}'.format(type(img)))
raise TypeError('Input img should be a Tensor.')

if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
Expand Down Expand Up @@ -763,10 +763,10 @@ def _assert_grid_transform_inputs(
coeffs: Optional[List[float]] = None,
):
if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):
raise TypeError("img should be Tensor Image. Got {}".format(type(img)))
raise TypeError("Input img should be Tensor Image")

if matrix is not None and not isinstance(matrix, list):
raise TypeError("Argument matrix should be a list. Got {}".format(type(matrix)))
raise TypeError("Argument matrix should be a list")

if matrix is not None and len(matrix) != 6:
raise ValueError("Argument matrix should have 6 float values")
Expand Down Expand Up @@ -989,7 +989,7 @@ def perspective(
Tensor: transformed image.
"""
if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):
raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))
raise TypeError('Input img should be Tensor Image')

_interpolation_modes = {
0: "nearest",
Expand Down