Skip to content
71 changes: 71 additions & 0 deletions torchvision/transforms/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
except ImportError:
accimage = None

from ..utils import _log_api_usage_once
from . import functional_pil as F_pil
from . import functional_tensor as F_t

Expand Down Expand Up @@ -67,6 +68,8 @@ def get_image_size(img: Tensor) -> List[int]:
Returns:
List[int]: The image size.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(get_image_size)
if isinstance(img, torch.Tensor):
return F_t.get_image_size(img)

Expand All @@ -82,6 +85,8 @@ def get_image_num_channels(img: Tensor) -> int:
Returns:
int: The number of channels.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(get_image_num_channels)
if isinstance(img, torch.Tensor):
return F_t.get_image_num_channels(img)

Expand Down Expand Up @@ -110,6 +115,8 @@ def to_tensor(pic):
Returns:
Tensor: Converted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(to_tensor)
if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}")

Expand Down Expand Up @@ -166,6 +173,8 @@ def pil_to_tensor(pic):
Returns:
Tensor: Converted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(pil_to_tensor)
if not F_pil._is_pil_image(pic):
raise TypeError(f"pic should be PIL Image. Got {type(pic)}")

Expand Down Expand Up @@ -205,6 +214,8 @@ def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(convert_image_dtype)
if not isinstance(image, torch.Tensor):
raise TypeError("Input img should be Tensor Image")

Expand All @@ -225,6 +236,8 @@ def to_pil_image(pic, mode=None):
Returns:
PIL Image: Image converted to PIL Image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(to_pil_image)
if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.")

Expand Down Expand Up @@ -322,6 +335,8 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool
Returns:
Tensor: Normalized Tensor image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(normalize)
if not isinstance(tensor, torch.Tensor):
raise TypeError(f"Input tensor should be a torch tensor. Got {type(tensor)}.")

Expand Down Expand Up @@ -401,6 +416,8 @@ def resize(
Returns:
PIL Image or Tensor: Resized image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(resize)
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
Expand All @@ -422,6 +439,8 @@ def resize(


def scale(*args, **kwargs):
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(scale)
warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.")
return resize(*args, **kwargs)

Expand Down Expand Up @@ -467,6 +486,8 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con
Returns:
PIL Image or Tensor: Padded image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(pad)
if not isinstance(img, torch.Tensor):
return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)

Expand All @@ -490,6 +511,8 @@ def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
PIL Image or Tensor: Cropped image.
"""

if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(crop)
if not isinstance(img, torch.Tensor):
return F_pil.crop(img, top, left, height, width)

Expand All @@ -510,6 +533,8 @@ def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
Returns:
PIL Image or Tensor: Cropped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(center_crop)
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
Expand Down Expand Up @@ -566,6 +591,8 @@ def resized_crop(
Returns:
PIL Image or Tensor: Cropped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(resized_crop)
img = crop(img, top, left, height, width)
img = resize(img, size, interpolation)
return img
Expand All @@ -583,6 +610,8 @@ def hflip(img: Tensor) -> Tensor:
Returns:
PIL Image or Tensor: Horizontally flipped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(hflip)
if not isinstance(img, torch.Tensor):
return F_pil.hflip(img)

Expand Down Expand Up @@ -648,6 +677,8 @@ def perspective(
Returns:
PIL Image or Tensor: transformed Image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(perspective)

coeffs = _get_perspective_coeffs(startpoints, endpoints)

Expand Down Expand Up @@ -681,6 +712,8 @@ def vflip(img: Tensor) -> Tensor:
Returns:
PIL Image or Tensor: Vertically flipped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(vflip)
if not isinstance(img, torch.Tensor):
return F_pil.vflip(img)

Expand All @@ -706,6 +739,8 @@ def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Ten
tuple: tuple (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(five_crop)
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
Expand Down Expand Up @@ -753,6 +788,8 @@ def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[
Corresponding top left, top right, bottom left, bottom right and
center crop and same for the flipped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(ten_crop)
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
Expand Down Expand Up @@ -786,6 +823,8 @@ def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
Returns:
PIL Image or Tensor: Brightness adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_brightness)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_brightness(img, brightness_factor)

Expand All @@ -806,6 +845,8 @@ def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
Returns:
PIL Image or Tensor: Contrast adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_contrast)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_contrast(img, contrast_factor)

Expand All @@ -826,6 +867,8 @@ def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
Returns:
PIL Image or Tensor: Saturation adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_saturation)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_saturation(img, saturation_factor)

Expand Down Expand Up @@ -860,6 +903,8 @@ def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
Returns:
PIL Image or Tensor: Hue adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_hue)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_hue(img, hue_factor)

Expand Down Expand Up @@ -891,6 +936,8 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
Returns:
PIL Image or Tensor: Gamma correction adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_gamma)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_gamma(img, gamma, gain)

Expand Down Expand Up @@ -987,6 +1034,8 @@ def rotate(
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(rotate)
if resample is not None:
warnings.warn(
"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
Expand Down Expand Up @@ -1067,6 +1116,8 @@ def affine(
Returns:
PIL Image or Tensor: Transformed image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(affine)
if resample is not None:
warnings.warn(
"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
Expand Down Expand Up @@ -1151,6 +1202,8 @@ def to_grayscale(img, num_output_channels=1):
- if num_output_channels = 1 : returned image is single channel
- if num_output_channels = 3 : returned image is 3 channel with r = g = b
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(to_grayscale)
if isinstance(img, Image.Image):
return F_pil.to_grayscale(img, num_output_channels)

Expand All @@ -1176,6 +1229,8 @@ def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
- if num_output_channels = 1 : returned image is single channel
- if num_output_channels = 3 : returned image is 3 channel with r = g = b
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(rgb_to_grayscale)
if not isinstance(img, torch.Tensor):
return F_pil.to_grayscale(img, num_output_channels)

Expand All @@ -1198,6 +1253,8 @@ def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool
Returns:
Tensor Image: Erased image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(erase)
if not isinstance(img, torch.Tensor):
raise TypeError(f"img should be Tensor Image. Got {type(img)}")

Expand Down Expand Up @@ -1234,6 +1291,8 @@ def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[floa
Returns:
PIL Image or Tensor: Gaussian Blurred version of the image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(gaussian_blur)
if not isinstance(kernel_size, (int, list, tuple)):
raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}")
if isinstance(kernel_size, int):
Expand Down Expand Up @@ -1285,6 +1344,8 @@ def invert(img: Tensor) -> Tensor:
Returns:
PIL Image or Tensor: Color inverted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(invert)
if not isinstance(img, torch.Tensor):
return F_pil.invert(img)

Expand All @@ -1304,6 +1365,8 @@ def posterize(img: Tensor, bits: int) -> Tensor:
Returns:
PIL Image or Tensor: Posterized image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(posterize)
if not (0 <= bits <= 8):
raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}")

Expand All @@ -1325,6 +1388,8 @@ def solarize(img: Tensor, threshold: float) -> Tensor:
Returns:
PIL Image or Tensor: Solarized image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(solarize)
if not isinstance(img, torch.Tensor):
return F_pil.solarize(img, threshold)

Expand All @@ -1345,6 +1410,8 @@ def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
Returns:
PIL Image or Tensor: Sharpness adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_sharpness)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_sharpness(img, sharpness_factor)

Expand All @@ -1365,6 +1432,8 @@ def autocontrast(img: Tensor) -> Tensor:
Returns:
PIL Image or Tensor: An image that was autocontrasted.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(autocontrast)
if not isinstance(img, torch.Tensor):
return F_pil.autocontrast(img)

Expand All @@ -1386,6 +1455,8 @@ def equalize(img: Tensor) -> Tensor:
Returns:
PIL Image or Tensor: An image that was equalized.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(equalize)
if not isinstance(img, torch.Tensor):
return F_pil.equalize(img)

Expand Down
Loading