diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index f05112ee498..e158ff4f805 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -1030,25 +1030,9 @@ def test_resized_crop(device, mode): (F_t.adjust_contrast, (1.0,)), (F_t.adjust_hue, (-0.5,)), (F_t.adjust_saturation, (2.0,)), - ( - F_t.pad, - ( - [ - 2, - ], - 2, - "constant", - ), - ), + (F_t.pad, ([2], 2, "constant")), (F_t.resize, ([10, 11],)), - ( - F_t.perspective, - ( - [ - 0.2, - ] - ), - ), + (F_t.perspective, ([0.2])), (F_t.gaussian_blur, ((2, 2), (0.7, 0.5))), (F_t.invert, ()), (F_t.posterize, (0,)), diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index da7acef3e7b..cf75034ee6c 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -570,12 +570,7 @@ def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtyp def _apply_grid_transform(img: Tensor, grid: Tensor, mode: str, fill: Optional[List[float]]) -> Tensor: - img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in( - img, - [ - grid.dtype, - ], - ) + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype]) if img.shape[0] > 1: # Apply same grid to a batch of images