From 4e7ab9dc1df6b5df48717954ff2374358bf757a3 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Mon, 29 Nov 2021 22:35:47 +0000 Subject: [PATCH 1/9] add api usage log to functional transforms --- torchvision/transforms/functional.py | 37 ++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index bd5b170626e..7f427736033 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -14,6 +14,7 @@ except ImportError: accimage = None +from ..utils import _log_api_usage_once from . import functional_pil as F_pil from . import functional_tensor as F_t @@ -56,6 +57,7 @@ def _interpolation_modes_from_int(i: int) -> InterpolationMode: } _is_pil_image = F_pil._is_pil_image +_MODULE = "torchvision.transforms.functional" def get_image_size(img: Tensor) -> List[int]: @@ -67,6 +69,7 @@ def get_image_size(img: Tensor) -> List[int]: Returns: List[int]: The image size. """ + _log_api_usage_once(f"{_MODULE}.get_image_size") if isinstance(img, torch.Tensor): return F_t.get_image_size(img) @@ -82,6 +85,7 @@ def get_image_num_channels(img: Tensor) -> int: Returns: int: The number of channels. """ + _log_api_usage_once(f"{_MODULE}.get_image_num_channels") if isinstance(img, torch.Tensor): return F_t.get_image_num_channels(img) @@ -110,6 +114,7 @@ def to_tensor(pic): Returns: Tensor: Converted image. """ + _log_api_usage_once(f"{_MODULE}.to_tensor") if not (F_pil._is_pil_image(pic) or _is_numpy(pic)): raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}") @@ -166,6 +171,7 @@ def pil_to_tensor(pic): Returns: Tensor: Converted image. """ + _log_api_usage_once(f"{_MODULE}.pil_to_tensor") if not F_pil._is_pil_image(pic): raise TypeError(f"pic should be PIL Image. Got {type(pic)}") @@ -205,6 +211,7 @@ def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) - overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range of the integer ``dtype``. """ + _log_api_usage_once(f"{_MODULE}.convert_image_dtype") if not isinstance(image, torch.Tensor): raise TypeError("Input img should be Tensor Image") @@ -225,6 +232,7 @@ def to_pil_image(pic, mode=None): Returns: PIL Image: Image converted to PIL Image. """ + _log_api_usage_once(f"{_MODULE}.to_pil_image") if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)): raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.") @@ -322,6 +330,7 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool Returns: Tensor: Normalized Tensor image. """ + _log_api_usage_once(f"{_MODULE}.normalize") if not isinstance(tensor, torch.Tensor): raise TypeError(f"Input tensor should be a torch tensor. Got {type(tensor)}.") @@ -401,6 +410,7 @@ def resize( Returns: PIL Image or Tensor: Resized image. """ + _log_api_usage_once(f"{_MODULE}.resize") # Backward compatibility with integer value if isinstance(interpolation, int): warnings.warn( @@ -422,6 +432,7 @@ def resize( def scale(*args, **kwargs): + _log_api_usage_once(f"{_MODULE}.scale") warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.") return resize(*args, **kwargs) @@ -467,6 +478,7 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con Returns: PIL Image or Tensor: Padded image. """ + _log_api_usage_once(f"{_MODULE}.pad") if not isinstance(img, torch.Tensor): return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode) @@ -490,6 +502,7 @@ def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: PIL Image or Tensor: Cropped image. """ + _log_api_usage_once(f"{_MODULE}.crop") if not isinstance(img, torch.Tensor): return F_pil.crop(img, top, left, height, width) @@ -510,6 +523,7 @@ def center_crop(img: Tensor, output_size: List[int]) -> Tensor: Returns: PIL Image or Tensor: Cropped image. """ + _log_api_usage_once(f"{_MODULE}.center_crop") if isinstance(output_size, numbers.Number): output_size = (int(output_size), int(output_size)) elif isinstance(output_size, (tuple, list)) and len(output_size) == 1: @@ -566,6 +580,7 @@ def resized_crop( Returns: PIL Image or Tensor: Cropped image. """ + _log_api_usage_once(f"{_MODULE}.resized_crop") img = crop(img, top, left, height, width) img = resize(img, size, interpolation) return img @@ -583,6 +598,7 @@ def hflip(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Horizontally flipped image. """ + _log_api_usage_once(f"{_MODULE}.hflip") if not isinstance(img, torch.Tensor): return F_pil.hflip(img) @@ -648,6 +664,7 @@ def perspective( Returns: PIL Image or Tensor: transformed Image. """ + _log_api_usage_once(f"{_MODULE}.perspective") coeffs = _get_perspective_coeffs(startpoints, endpoints) @@ -681,6 +698,7 @@ def vflip(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Vertically flipped image. """ + _log_api_usage_once(f"{_MODULE}.vflip") if not isinstance(img, torch.Tensor): return F_pil.vflip(img) @@ -706,6 +724,7 @@ def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Ten tuple: tuple (tl, tr, bl, br, center) Corresponding top left, top right, bottom left, bottom right and center crop. """ + _log_api_usage_once(f"{_MODULE}.five_crop") if isinstance(size, numbers.Number): size = (int(size), int(size)) elif isinstance(size, (tuple, list)) and len(size) == 1: @@ -753,6 +772,7 @@ def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[ Corresponding top left, top right, bottom left, bottom right and center crop and same for the flipped image. """ + _log_api_usage_once(f"{_MODULE}.ten_crop") if isinstance(size, numbers.Number): size = (int(size), int(size)) elif isinstance(size, (tuple, list)) and len(size) == 1: @@ -786,6 +806,7 @@ def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: Returns: PIL Image or Tensor: Brightness adjusted image. """ + _log_api_usage_once(f"{_MODULE}.adjust_brightness") if not isinstance(img, torch.Tensor): return F_pil.adjust_brightness(img, brightness_factor) @@ -806,6 +827,7 @@ def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: Returns: PIL Image or Tensor: Contrast adjusted image. """ + _log_api_usage_once(f"{_MODULE}.adjust_contrast") if not isinstance(img, torch.Tensor): return F_pil.adjust_contrast(img, contrast_factor) @@ -826,6 +848,7 @@ def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: Returns: PIL Image or Tensor: Saturation adjusted image. """ + _log_api_usage_once(f"{_MODULE}.adjust_saturation") if not isinstance(img, torch.Tensor): return F_pil.adjust_saturation(img, saturation_factor) @@ -860,6 +883,7 @@ def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: Returns: PIL Image or Tensor: Hue adjusted image. """ + _log_api_usage_once(f"{_MODULE}.adjust_hue") if not isinstance(img, torch.Tensor): return F_pil.adjust_hue(img, hue_factor) @@ -891,6 +915,7 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: Returns: PIL Image or Tensor: Gamma correction adjusted image. """ + _log_api_usage_once(f"{_MODULE}.adjust_gamma") if not isinstance(img, torch.Tensor): return F_pil.adjust_gamma(img, gamma, gain) @@ -987,6 +1012,7 @@ def rotate( .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters """ + _log_api_usage_once(f"{_MODULE}.rotate") if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1067,6 +1093,7 @@ def affine( Returns: PIL Image or Tensor: Transformed image. """ + _log_api_usage_once(f"{_MODULE}.affine") if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1151,6 +1178,7 @@ def to_grayscale(img, num_output_channels=1): - if num_output_channels = 1 : returned image is single channel - if num_output_channels = 3 : returned image is 3 channel with r = g = b """ + _log_api_usage_once(f"{_MODULE}.to_grayscale") if isinstance(img, Image.Image): return F_pil.to_grayscale(img, num_output_channels) @@ -1176,6 +1204,7 @@ def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: - if num_output_channels = 1 : returned image is single channel - if num_output_channels = 3 : returned image is 3 channel with r = g = b """ + _log_api_usage_once(f"{_MODULE}.rgb_to_grayscale") if not isinstance(img, torch.Tensor): return F_pil.to_grayscale(img, num_output_channels) @@ -1198,6 +1227,7 @@ def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool Returns: Tensor Image: Erased image. """ + _log_api_usage_once(f"{_MODULE}.erase") if not isinstance(img, torch.Tensor): raise TypeError(f"img should be Tensor Image. Got {type(img)}") @@ -1234,6 +1264,7 @@ def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[floa Returns: PIL Image or Tensor: Gaussian Blurred version of the image. """ + _log_api_usage_once(f"{_MODULE}.gaussian_blur") if not isinstance(kernel_size, (int, list, tuple)): raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}") if isinstance(kernel_size, int): @@ -1285,6 +1316,7 @@ def invert(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Color inverted image. """ + _log_api_usage_once(f"{_MODULE}.invert") if not isinstance(img, torch.Tensor): return F_pil.invert(img) @@ -1304,6 +1336,7 @@ def posterize(img: Tensor, bits: int) -> Tensor: Returns: PIL Image or Tensor: Posterized image. """ + _log_api_usage_once(f"{_MODULE}.posterize") if not (0 <= bits <= 8): raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}") @@ -1325,6 +1358,7 @@ def solarize(img: Tensor, threshold: float) -> Tensor: Returns: PIL Image or Tensor: Solarized image. """ + _log_api_usage_once(f"{_MODULE}.solarize") if not isinstance(img, torch.Tensor): return F_pil.solarize(img, threshold) @@ -1345,6 +1379,7 @@ def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: Returns: PIL Image or Tensor: Sharpness adjusted image. """ + _log_api_usage_once(f"{_MODULE}.adjust_sharpness") if not isinstance(img, torch.Tensor): return F_pil.adjust_sharpness(img, sharpness_factor) @@ -1365,6 +1400,7 @@ def autocontrast(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: An image that was autocontrasted. """ + _log_api_usage_once(f"{_MODULE}.autocontrast") if not isinstance(img, torch.Tensor): return F_pil.autocontrast(img) @@ -1386,6 +1422,7 @@ def equalize(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: An image that was equalized. """ + _log_api_usage_once(f"{_MODULE}.equalize") if not isinstance(img, torch.Tensor): return F_pil.equalize(img) From 4649e4d20e6aeb792b296622716cb49583cc6a65 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Mon, 29 Nov 2021 22:51:40 +0000 Subject: [PATCH 2/9] add log to transforms --- torchvision/transforms/transforms.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index a409ff3cbb8..dab67e1abea 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -13,6 +13,7 @@ except ImportError: accimage = None +from ..utils import _log_api_usage_once from . import functional as F from .functional import InterpolationMode, _interpolation_modes_from_int @@ -87,6 +88,7 @@ class Compose: """ def __init__(self, transforms): + _log_api_usage_once(self) self.transforms = transforms def __call__(self, img): @@ -462,6 +464,7 @@ class Lambda: """ def __init__(self, lambd): + _log_api_usage_once(self) if not callable(lambd): raise TypeError(f"Argument lambd should be callable, got {repr(type(lambd).__name__)}") self.lambd = lambd @@ -481,6 +484,7 @@ class RandomTransforms: """ def __init__(self, transforms): + _log_api_usage_once(self) if not isinstance(transforms, Sequence): raise TypeError("Argument transforms should be a sequence") self.transforms = transforms @@ -519,6 +523,7 @@ class RandomApply(torch.nn.Module): def __init__(self, transforms, p=0.5): super().__init__() + _log_api_usage_once(self) self.transforms = transforms self.p = p @@ -639,6 +644,7 @@ def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"): super().__init__() + _log_api_usage_once(self) self.size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")) @@ -688,6 +694,7 @@ class RandomHorizontalFlip(torch.nn.Module): def __init__(self, p=0.5): super().__init__() + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -718,6 +725,7 @@ class RandomVerticalFlip(torch.nn.Module): def __init__(self, p=0.5): super().__init__() + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -755,6 +763,7 @@ class RandomPerspective(torch.nn.Module): def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0): super().__init__() + _log_api_usage_once(self) self.p = p # Backward compatibility with integer value @@ -867,6 +876,7 @@ class RandomResizedCrop(torch.nn.Module): def __init__(self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation=InterpolationMode.BILINEAR): super().__init__() + _log_api_usage_once(self) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") if not isinstance(scale, Sequence): @@ -1081,6 +1091,7 @@ class LinearTransformation(torch.nn.Module): def __init__(self, transformation_matrix, mean_vector): super().__init__() + _log_api_usage_once(self) if transformation_matrix.size(0) != transformation_matrix.size(1): raise ValueError( "transformation_matrix should be square. Got " @@ -1159,6 +1170,7 @@ class ColorJitter(torch.nn.Module): def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): super().__init__() + _log_api_usage_once(self) self.brightness = self._check_input(brightness, "brightness") self.contrast = self._check_input(contrast, "contrast") self.saturation = self._check_input(saturation, "saturation") @@ -1281,6 +1293,7 @@ def __init__( self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None ): super().__init__() + _log_api_usage_once(self) if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1401,6 +1414,7 @@ def __init__( resample=None, ): super().__init__() + _log_api_usage_once(self) if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1545,6 +1559,7 @@ class Grayscale(torch.nn.Module): def __init__(self, num_output_channels=1): super().__init__() + _log_api_usage_once(self) self.num_output_channels = num_output_channels def forward(self, img): @@ -1579,6 +1594,7 @@ class RandomGrayscale(torch.nn.Module): def __init__(self, p=0.1): super().__init__() + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -1628,6 +1644,7 @@ class RandomErasing(torch.nn.Module): def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False): super().__init__() + _log_api_usage_once(self) if not isinstance(value, (numbers.Number, str, tuple, list)): raise TypeError("Argument value should be either a number or str or a sequence") if isinstance(value, str) and value != "random": @@ -1751,6 +1768,7 @@ class GaussianBlur(torch.nn.Module): def __init__(self, kernel_size, sigma=(0.1, 2.0)): super().__init__() + _log_api_usage_once(self) self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers") for ks in self.kernel_size: if ks <= 0 or ks % 2 == 0: @@ -1842,6 +1860,7 @@ class RandomInvert(torch.nn.Module): def __init__(self, p=0.5): super().__init__() + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -1873,6 +1892,7 @@ class RandomPosterize(torch.nn.Module): def __init__(self, bits, p=0.5): super().__init__() + _log_api_usage_once(self) self.bits = bits self.p = p @@ -1905,6 +1925,7 @@ class RandomSolarize(torch.nn.Module): def __init__(self, threshold, p=0.5): super().__init__() + _log_api_usage_once(self) self.threshold = threshold self.p = p @@ -1937,6 +1958,7 @@ class RandomAdjustSharpness(torch.nn.Module): def __init__(self, sharpness_factor, p=0.5): super().__init__() + _log_api_usage_once(self) self.sharpness_factor = sharpness_factor self.p = p @@ -1968,6 +1990,7 @@ class RandomAutocontrast(torch.nn.Module): def __init__(self, p=0.5): super().__init__() + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -1998,6 +2021,7 @@ class RandomEqualize(torch.nn.Module): def __init__(self, p=0.5): super().__init__() + _log_api_usage_once(self) self.p = p def forward(self, img): From 470dadf3a63f514567b3787bfcff8ad102d29cb0 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Mon, 29 Nov 2021 23:21:39 +0000 Subject: [PATCH 3/9] fix for scriptablity --- torchvision/transforms/functional.py | 71 ++++++++++++++-------------- 1 file changed, 35 insertions(+), 36 deletions(-) diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 7f427736033..f24fe070726 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -57,7 +57,6 @@ def _interpolation_modes_from_int(i: int) -> InterpolationMode: } _is_pil_image = F_pil._is_pil_image -_MODULE = "torchvision.transforms.functional" def get_image_size(img: Tensor) -> List[int]: @@ -69,7 +68,7 @@ def get_image_size(img: Tensor) -> List[int]: Returns: List[int]: The image size. """ - _log_api_usage_once(f"{_MODULE}.get_image_size") + _log_api_usage_once("torchvision.transforms.functional.get_image_size") if isinstance(img, torch.Tensor): return F_t.get_image_size(img) @@ -85,7 +84,7 @@ def get_image_num_channels(img: Tensor) -> int: Returns: int: The number of channels. """ - _log_api_usage_once(f"{_MODULE}.get_image_num_channels") + _log_api_usage_once("torchvision.transforms.functional.get_image_num_channels") if isinstance(img, torch.Tensor): return F_t.get_image_num_channels(img) @@ -114,7 +113,7 @@ def to_tensor(pic): Returns: Tensor: Converted image. """ - _log_api_usage_once(f"{_MODULE}.to_tensor") + _log_api_usage_once("torchvision.transforms.functional.to_tensor") if not (F_pil._is_pil_image(pic) or _is_numpy(pic)): raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}") @@ -171,7 +170,7 @@ def pil_to_tensor(pic): Returns: Tensor: Converted image. """ - _log_api_usage_once(f"{_MODULE}.pil_to_tensor") + _log_api_usage_once("torchvision.transforms.functional.pil_to_tensor") if not F_pil._is_pil_image(pic): raise TypeError(f"pic should be PIL Image. Got {type(pic)}") @@ -211,7 +210,7 @@ def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) - overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range of the integer ``dtype``. """ - _log_api_usage_once(f"{_MODULE}.convert_image_dtype") + _log_api_usage_once("torchvision.transforms.functional.convert_image_dtype") if not isinstance(image, torch.Tensor): raise TypeError("Input img should be Tensor Image") @@ -232,7 +231,7 @@ def to_pil_image(pic, mode=None): Returns: PIL Image: Image converted to PIL Image. """ - _log_api_usage_once(f"{_MODULE}.to_pil_image") + _log_api_usage_once("torchvision.transforms.functional.to_pil_image") if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)): raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.") @@ -330,7 +329,7 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool Returns: Tensor: Normalized Tensor image. """ - _log_api_usage_once(f"{_MODULE}.normalize") + _log_api_usage_once("torchvision.transforms.functional.normalize") if not isinstance(tensor, torch.Tensor): raise TypeError(f"Input tensor should be a torch tensor. Got {type(tensor)}.") @@ -410,7 +409,7 @@ def resize( Returns: PIL Image or Tensor: Resized image. """ - _log_api_usage_once(f"{_MODULE}.resize") + _log_api_usage_once("torchvision.transforms.functional.resize") # Backward compatibility with integer value if isinstance(interpolation, int): warnings.warn( @@ -432,7 +431,7 @@ def resize( def scale(*args, **kwargs): - _log_api_usage_once(f"{_MODULE}.scale") + _log_api_usage_once("torchvision.transforms.functional.scale") warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.") return resize(*args, **kwargs) @@ -478,7 +477,7 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con Returns: PIL Image or Tensor: Padded image. """ - _log_api_usage_once(f"{_MODULE}.pad") + _log_api_usage_once("torchvision.transforms.functional.pad") if not isinstance(img, torch.Tensor): return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode) @@ -502,7 +501,7 @@ def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: PIL Image or Tensor: Cropped image. """ - _log_api_usage_once(f"{_MODULE}.crop") + _log_api_usage_once("torchvision.transforms.functional.crop") if not isinstance(img, torch.Tensor): return F_pil.crop(img, top, left, height, width) @@ -523,7 +522,7 @@ def center_crop(img: Tensor, output_size: List[int]) -> Tensor: Returns: PIL Image or Tensor: Cropped image. """ - _log_api_usage_once(f"{_MODULE}.center_crop") + _log_api_usage_once("torchvision.transforms.functional.center_crop") if isinstance(output_size, numbers.Number): output_size = (int(output_size), int(output_size)) elif isinstance(output_size, (tuple, list)) and len(output_size) == 1: @@ -580,7 +579,7 @@ def resized_crop( Returns: PIL Image or Tensor: Cropped image. """ - _log_api_usage_once(f"{_MODULE}.resized_crop") + _log_api_usage_once("torchvision.transforms.functional.resized_crop") img = crop(img, top, left, height, width) img = resize(img, size, interpolation) return img @@ -598,7 +597,7 @@ def hflip(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Horizontally flipped image. """ - _log_api_usage_once(f"{_MODULE}.hflip") + _log_api_usage_once("torchvision.transforms.functional.hflip") if not isinstance(img, torch.Tensor): return F_pil.hflip(img) @@ -664,7 +663,7 @@ def perspective( Returns: PIL Image or Tensor: transformed Image. """ - _log_api_usage_once(f"{_MODULE}.perspective") + _log_api_usage_once("torchvision.transforms.functional.perspective") coeffs = _get_perspective_coeffs(startpoints, endpoints) @@ -698,7 +697,7 @@ def vflip(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Vertically flipped image. """ - _log_api_usage_once(f"{_MODULE}.vflip") + _log_api_usage_once("torchvision.transforms.functional.vflip") if not isinstance(img, torch.Tensor): return F_pil.vflip(img) @@ -724,7 +723,7 @@ def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Ten tuple: tuple (tl, tr, bl, br, center) Corresponding top left, top right, bottom left, bottom right and center crop. """ - _log_api_usage_once(f"{_MODULE}.five_crop") + _log_api_usage_once("torchvision.transforms.functional.five_crop") if isinstance(size, numbers.Number): size = (int(size), int(size)) elif isinstance(size, (tuple, list)) and len(size) == 1: @@ -772,7 +771,7 @@ def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[ Corresponding top left, top right, bottom left, bottom right and center crop and same for the flipped image. """ - _log_api_usage_once(f"{_MODULE}.ten_crop") + _log_api_usage_once("torchvision.transforms.functional.ten_crop") if isinstance(size, numbers.Number): size = (int(size), int(size)) elif isinstance(size, (tuple, list)) and len(size) == 1: @@ -806,7 +805,7 @@ def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: Returns: PIL Image or Tensor: Brightness adjusted image. """ - _log_api_usage_once(f"{_MODULE}.adjust_brightness") + _log_api_usage_once("torchvision.transforms.functional.adjust_brightness") if not isinstance(img, torch.Tensor): return F_pil.adjust_brightness(img, brightness_factor) @@ -827,7 +826,7 @@ def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: Returns: PIL Image or Tensor: Contrast adjusted image. """ - _log_api_usage_once(f"{_MODULE}.adjust_contrast") + _log_api_usage_once("torchvision.transforms.functional.adjust_contrast") if not isinstance(img, torch.Tensor): return F_pil.adjust_contrast(img, contrast_factor) @@ -848,7 +847,7 @@ def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: Returns: PIL Image or Tensor: Saturation adjusted image. """ - _log_api_usage_once(f"{_MODULE}.adjust_saturation") + _log_api_usage_once("torchvision.transforms.functional.adjust_saturation") if not isinstance(img, torch.Tensor): return F_pil.adjust_saturation(img, saturation_factor) @@ -883,7 +882,7 @@ def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: Returns: PIL Image or Tensor: Hue adjusted image. """ - _log_api_usage_once(f"{_MODULE}.adjust_hue") + _log_api_usage_once("torchvision.transforms.functional.adjust_hue") if not isinstance(img, torch.Tensor): return F_pil.adjust_hue(img, hue_factor) @@ -915,7 +914,7 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: Returns: PIL Image or Tensor: Gamma correction adjusted image. """ - _log_api_usage_once(f"{_MODULE}.adjust_gamma") + _log_api_usage_once("torchvision.transforms.functional.adjust_gamma") if not isinstance(img, torch.Tensor): return F_pil.adjust_gamma(img, gamma, gain) @@ -1012,7 +1011,7 @@ def rotate( .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters """ - _log_api_usage_once(f"{_MODULE}.rotate") + _log_api_usage_once("torchvision.transforms.functional.rotate") if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1093,7 +1092,7 @@ def affine( Returns: PIL Image or Tensor: Transformed image. """ - _log_api_usage_once(f"{_MODULE}.affine") + _log_api_usage_once("torchvision.transforms.functional.affine") if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1178,7 +1177,7 @@ def to_grayscale(img, num_output_channels=1): - if num_output_channels = 1 : returned image is single channel - if num_output_channels = 3 : returned image is 3 channel with r = g = b """ - _log_api_usage_once(f"{_MODULE}.to_grayscale") + _log_api_usage_once("torchvision.transforms.functional.to_grayscale") if isinstance(img, Image.Image): return F_pil.to_grayscale(img, num_output_channels) @@ -1204,7 +1203,7 @@ def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: - if num_output_channels = 1 : returned image is single channel - if num_output_channels = 3 : returned image is 3 channel with r = g = b """ - _log_api_usage_once(f"{_MODULE}.rgb_to_grayscale") + _log_api_usage_once("torchvision.transforms.functional.rgb_to_grayscale") if not isinstance(img, torch.Tensor): return F_pil.to_grayscale(img, num_output_channels) @@ -1227,7 +1226,7 @@ def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool Returns: Tensor Image: Erased image. """ - _log_api_usage_once(f"{_MODULE}.erase") + _log_api_usage_once("torchvision.transforms.functional.erase") if not isinstance(img, torch.Tensor): raise TypeError(f"img should be Tensor Image. Got {type(img)}") @@ -1264,7 +1263,7 @@ def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[floa Returns: PIL Image or Tensor: Gaussian Blurred version of the image. """ - _log_api_usage_once(f"{_MODULE}.gaussian_blur") + _log_api_usage_once("torchvision.transforms.functional.gaussian_blur") if not isinstance(kernel_size, (int, list, tuple)): raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}") if isinstance(kernel_size, int): @@ -1316,7 +1315,7 @@ def invert(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Color inverted image. """ - _log_api_usage_once(f"{_MODULE}.invert") + _log_api_usage_once("torchvision.transforms.functional.invert") if not isinstance(img, torch.Tensor): return F_pil.invert(img) @@ -1336,7 +1335,7 @@ def posterize(img: Tensor, bits: int) -> Tensor: Returns: PIL Image or Tensor: Posterized image. """ - _log_api_usage_once(f"{_MODULE}.posterize") + _log_api_usage_once("torchvision.transforms.functional.posterize") if not (0 <= bits <= 8): raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}") @@ -1358,7 +1357,7 @@ def solarize(img: Tensor, threshold: float) -> Tensor: Returns: PIL Image or Tensor: Solarized image. """ - _log_api_usage_once(f"{_MODULE}.solarize") + _log_api_usage_once("torchvision.transforms.functional.solarize") if not isinstance(img, torch.Tensor): return F_pil.solarize(img, threshold) @@ -1379,7 +1378,7 @@ def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: Returns: PIL Image or Tensor: Sharpness adjusted image. """ - _log_api_usage_once(f"{_MODULE}.adjust_sharpness") + _log_api_usage_once("torchvision.transforms.functional.adjust_sharpness") if not isinstance(img, torch.Tensor): return F_pil.adjust_sharpness(img, sharpness_factor) @@ -1400,7 +1399,7 @@ def autocontrast(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: An image that was autocontrasted. """ - _log_api_usage_once(f"{_MODULE}.autocontrast") + _log_api_usage_once("torchvision.transforms.functional.autocontrast") if not isinstance(img, torch.Tensor): return F_pil.autocontrast(img) @@ -1422,7 +1421,7 @@ def equalize(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: An image that was equalized. """ - _log_api_usage_once(f"{_MODULE}.equalize") + _log_api_usage_once("torchvision.transforms.functional.equalize") if not isinstance(img, torch.Tensor): return F_pil.equalize(img) From 37884a06a437f3254c06adbad08d170bcfbc52b7 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Tue, 30 Nov 2021 00:07:22 +0000 Subject: [PATCH 4/9] skip Compose for scriptability --- torchvision/transforms/transforms.py | 1 - 1 file changed, 1 deletion(-) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index dab67e1abea..37c9a709b25 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -88,7 +88,6 @@ class Compose: """ def __init__(self, transforms): - _log_api_usage_once(self) self.transforms = transforms def __call__(self, img): From e3fd8bb71b927adb9dbc1feb54d8547102e26c96 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Thu, 9 Dec 2021 23:51:08 +0000 Subject: [PATCH 5/9] follow the new policy --- torchvision/transforms/functional.py | 70 ++++++++++++++-------------- torchvision/transforms/transforms.py | 57 +++++++++++++--------- 2 files changed, 70 insertions(+), 57 deletions(-) diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index f24fe070726..99f4ce7e203 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -68,7 +68,7 @@ def get_image_size(img: Tensor) -> List[int]: Returns: List[int]: The image size. """ - _log_api_usage_once("torchvision.transforms.functional.get_image_size") + _log_api_usage_once("transforms", "get_image_size") if isinstance(img, torch.Tensor): return F_t.get_image_size(img) @@ -84,7 +84,7 @@ def get_image_num_channels(img: Tensor) -> int: Returns: int: The number of channels. """ - _log_api_usage_once("torchvision.transforms.functional.get_image_num_channels") + _log_api_usage_once("transforms", "get_image_num_channels") if isinstance(img, torch.Tensor): return F_t.get_image_num_channels(img) @@ -113,7 +113,7 @@ def to_tensor(pic): Returns: Tensor: Converted image. """ - _log_api_usage_once("torchvision.transforms.functional.to_tensor") + _log_api_usage_once("transforms", "to_tensor") if not (F_pil._is_pil_image(pic) or _is_numpy(pic)): raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}") @@ -170,7 +170,7 @@ def pil_to_tensor(pic): Returns: Tensor: Converted image. """ - _log_api_usage_once("torchvision.transforms.functional.pil_to_tensor") + _log_api_usage_once("transforms", "pil_to_tensor") if not F_pil._is_pil_image(pic): raise TypeError(f"pic should be PIL Image. Got {type(pic)}") @@ -210,7 +210,7 @@ def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) - overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range of the integer ``dtype``. """ - _log_api_usage_once("torchvision.transforms.functional.convert_image_dtype") + _log_api_usage_once("transforms", "convert_image_dtype") if not isinstance(image, torch.Tensor): raise TypeError("Input img should be Tensor Image") @@ -231,7 +231,7 @@ def to_pil_image(pic, mode=None): Returns: PIL Image: Image converted to PIL Image. """ - _log_api_usage_once("torchvision.transforms.functional.to_pil_image") + _log_api_usage_once("transforms", "to_pil_image") if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)): raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.") @@ -329,7 +329,7 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool Returns: Tensor: Normalized Tensor image. """ - _log_api_usage_once("torchvision.transforms.functional.normalize") + _log_api_usage_once("transforms", "normalize") if not isinstance(tensor, torch.Tensor): raise TypeError(f"Input tensor should be a torch tensor. Got {type(tensor)}.") @@ -409,7 +409,7 @@ def resize( Returns: PIL Image or Tensor: Resized image. """ - _log_api_usage_once("torchvision.transforms.functional.resize") + _log_api_usage_once("transforms", "resize") # Backward compatibility with integer value if isinstance(interpolation, int): warnings.warn( @@ -431,7 +431,7 @@ def resize( def scale(*args, **kwargs): - _log_api_usage_once("torchvision.transforms.functional.scale") + _log_api_usage_once("transforms", "scale") warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.") return resize(*args, **kwargs) @@ -477,7 +477,7 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con Returns: PIL Image or Tensor: Padded image. """ - _log_api_usage_once("torchvision.transforms.functional.pad") + _log_api_usage_once("transforms", "pad") if not isinstance(img, torch.Tensor): return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode) @@ -501,7 +501,7 @@ def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: PIL Image or Tensor: Cropped image. """ - _log_api_usage_once("torchvision.transforms.functional.crop") + _log_api_usage_once("transforms", "crop") if not isinstance(img, torch.Tensor): return F_pil.crop(img, top, left, height, width) @@ -522,7 +522,7 @@ def center_crop(img: Tensor, output_size: List[int]) -> Tensor: Returns: PIL Image or Tensor: Cropped image. """ - _log_api_usage_once("torchvision.transforms.functional.center_crop") + _log_api_usage_once("transforms", "center_crop") if isinstance(output_size, numbers.Number): output_size = (int(output_size), int(output_size)) elif isinstance(output_size, (tuple, list)) and len(output_size) == 1: @@ -579,7 +579,7 @@ def resized_crop( Returns: PIL Image or Tensor: Cropped image. """ - _log_api_usage_once("torchvision.transforms.functional.resized_crop") + _log_api_usage_once("transforms", "resized_crop") img = crop(img, top, left, height, width) img = resize(img, size, interpolation) return img @@ -597,7 +597,7 @@ def hflip(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Horizontally flipped image. """ - _log_api_usage_once("torchvision.transforms.functional.hflip") + _log_api_usage_once("transforms", "hflip") if not isinstance(img, torch.Tensor): return F_pil.hflip(img) @@ -663,7 +663,7 @@ def perspective( Returns: PIL Image or Tensor: transformed Image. """ - _log_api_usage_once("torchvision.transforms.functional.perspective") + _log_api_usage_once("transforms", "perspective") coeffs = _get_perspective_coeffs(startpoints, endpoints) @@ -697,7 +697,7 @@ def vflip(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Vertically flipped image. """ - _log_api_usage_once("torchvision.transforms.functional.vflip") + _log_api_usage_once("transforms", "vflip") if not isinstance(img, torch.Tensor): return F_pil.vflip(img) @@ -723,7 +723,7 @@ def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Ten tuple: tuple (tl, tr, bl, br, center) Corresponding top left, top right, bottom left, bottom right and center crop. """ - _log_api_usage_once("torchvision.transforms.functional.five_crop") + _log_api_usage_once("transforms", "five_crop") if isinstance(size, numbers.Number): size = (int(size), int(size)) elif isinstance(size, (tuple, list)) and len(size) == 1: @@ -771,7 +771,7 @@ def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[ Corresponding top left, top right, bottom left, bottom right and center crop and same for the flipped image. """ - _log_api_usage_once("torchvision.transforms.functional.ten_crop") + _log_api_usage_once("transforms", "ten_crop") if isinstance(size, numbers.Number): size = (int(size), int(size)) elif isinstance(size, (tuple, list)) and len(size) == 1: @@ -805,7 +805,7 @@ def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: Returns: PIL Image or Tensor: Brightness adjusted image. """ - _log_api_usage_once("torchvision.transforms.functional.adjust_brightness") + _log_api_usage_once("transforms", "adjust_brightness") if not isinstance(img, torch.Tensor): return F_pil.adjust_brightness(img, brightness_factor) @@ -826,7 +826,7 @@ def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: Returns: PIL Image or Tensor: Contrast adjusted image. """ - _log_api_usage_once("torchvision.transforms.functional.adjust_contrast") + _log_api_usage_once("transforms", "adjust_contrast") if not isinstance(img, torch.Tensor): return F_pil.adjust_contrast(img, contrast_factor) @@ -847,7 +847,7 @@ def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: Returns: PIL Image or Tensor: Saturation adjusted image. """ - _log_api_usage_once("torchvision.transforms.functional.adjust_saturation") + _log_api_usage_once("transforms", "adjust_saturation") if not isinstance(img, torch.Tensor): return F_pil.adjust_saturation(img, saturation_factor) @@ -882,7 +882,7 @@ def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: Returns: PIL Image or Tensor: Hue adjusted image. """ - _log_api_usage_once("torchvision.transforms.functional.adjust_hue") + _log_api_usage_once("transforms", "adjust_hue") if not isinstance(img, torch.Tensor): return F_pil.adjust_hue(img, hue_factor) @@ -914,7 +914,7 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: Returns: PIL Image or Tensor: Gamma correction adjusted image. """ - _log_api_usage_once("torchvision.transforms.functional.adjust_gamma") + _log_api_usage_once("transforms", "adjust_gamma") if not isinstance(img, torch.Tensor): return F_pil.adjust_gamma(img, gamma, gain) @@ -1011,7 +1011,7 @@ def rotate( .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters """ - _log_api_usage_once("torchvision.transforms.functional.rotate") + _log_api_usage_once("transforms", "rotate") if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1092,7 +1092,7 @@ def affine( Returns: PIL Image or Tensor: Transformed image. """ - _log_api_usage_once("torchvision.transforms.functional.affine") + _log_api_usage_once("transforms", "affine") if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1177,7 +1177,7 @@ def to_grayscale(img, num_output_channels=1): - if num_output_channels = 1 : returned image is single channel - if num_output_channels = 3 : returned image is 3 channel with r = g = b """ - _log_api_usage_once("torchvision.transforms.functional.to_grayscale") + _log_api_usage_once("transforms", "to_grayscale") if isinstance(img, Image.Image): return F_pil.to_grayscale(img, num_output_channels) @@ -1203,7 +1203,7 @@ def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: - if num_output_channels = 1 : returned image is single channel - if num_output_channels = 3 : returned image is 3 channel with r = g = b """ - _log_api_usage_once("torchvision.transforms.functional.rgb_to_grayscale") + _log_api_usage_once("transforms", "rgb_to_grayscale") if not isinstance(img, torch.Tensor): return F_pil.to_grayscale(img, num_output_channels) @@ -1226,7 +1226,7 @@ def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool Returns: Tensor Image: Erased image. """ - _log_api_usage_once("torchvision.transforms.functional.erase") + _log_api_usage_once("transforms", "erase") if not isinstance(img, torch.Tensor): raise TypeError(f"img should be Tensor Image. Got {type(img)}") @@ -1263,7 +1263,7 @@ def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[floa Returns: PIL Image or Tensor: Gaussian Blurred version of the image. """ - _log_api_usage_once("torchvision.transforms.functional.gaussian_blur") + _log_api_usage_once("transforms", "gaussian_blur") if not isinstance(kernel_size, (int, list, tuple)): raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}") if isinstance(kernel_size, int): @@ -1315,7 +1315,7 @@ def invert(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Color inverted image. """ - _log_api_usage_once("torchvision.transforms.functional.invert") + _log_api_usage_once("transforms", "invert") if not isinstance(img, torch.Tensor): return F_pil.invert(img) @@ -1335,7 +1335,7 @@ def posterize(img: Tensor, bits: int) -> Tensor: Returns: PIL Image or Tensor: Posterized image. """ - _log_api_usage_once("torchvision.transforms.functional.posterize") + _log_api_usage_once("transforms", "posterize") if not (0 <= bits <= 8): raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}") @@ -1357,7 +1357,7 @@ def solarize(img: Tensor, threshold: float) -> Tensor: Returns: PIL Image or Tensor: Solarized image. """ - _log_api_usage_once("torchvision.transforms.functional.solarize") + _log_api_usage_once("transforms", "solarize") if not isinstance(img, torch.Tensor): return F_pil.solarize(img, threshold) @@ -1378,7 +1378,7 @@ def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: Returns: PIL Image or Tensor: Sharpness adjusted image. """ - _log_api_usage_once("torchvision.transforms.functional.adjust_sharpness") + _log_api_usage_once("transforms", "adjust_sharpness") if not isinstance(img, torch.Tensor): return F_pil.adjust_sharpness(img, sharpness_factor) @@ -1399,7 +1399,7 @@ def autocontrast(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: An image that was autocontrasted. """ - _log_api_usage_once("torchvision.transforms.functional.autocontrast") + _log_api_usage_once("transforms", "autocontrast") if not isinstance(img, torch.Tensor): return F_pil.autocontrast(img) @@ -1421,7 +1421,7 @@ def equalize(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: An image that was equalized. """ - _log_api_usage_once("torchvision.transforms.functional.equalize") + _log_api_usage_once("transforms", "equalize") if not isinstance(img, torch.Tensor): return F_pil.equalize(img) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index 37c9a709b25..c7d72ebed8d 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -88,6 +88,7 @@ class Compose: """ def __init__(self, transforms): + _log_api_usage_once("transforms", self.__class__.__name__) self.transforms = transforms def __call__(self, img): @@ -129,6 +130,7 @@ def __call__(self, pic): Returns: Tensor: Converted image. """ + _log_api_usage_once("transforms", self.__class__.__name__) return F.to_tensor(pic) def __repr__(self): @@ -153,6 +155,7 @@ def __call__(self, pic): Returns: Tensor: Converted image. """ + _log_api_usage_once("transforms", self.__class__.__name__) return F.pil_to_tensor(pic) def __repr__(self): @@ -180,6 +183,7 @@ class ConvertImageDtype(torch.nn.Module): def __init__(self, dtype: torch.dtype) -> None: super().__init__() + _log_api_usage_once("transforms", self.__class__.__name__) self.dtype = dtype def forward(self, image): @@ -205,6 +209,7 @@ class ToPILImage: """ def __init__(self, mode=None): + _log_api_usage_once("transforms", self.__class__.__name__) self.mode = mode def __call__(self, pic): @@ -246,6 +251,7 @@ class Normalize(torch.nn.Module): def __init__(self, mean, std, inplace=False): super().__init__() + _log_api_usage_once("transforms", self.__class__.__name__) self.mean = mean self.std = std self.inplace = inplace @@ -310,6 +316,7 @@ class Resize(torch.nn.Module): def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None, antialias=None): super().__init__() + _log_api_usage_once("transforms", self.__class__.__name__) if not isinstance(size, (int, Sequence)): raise TypeError(f"Size should be int or sequence. Got {type(size)}") if isinstance(size, Sequence) and len(size) not in (1, 2): @@ -351,6 +358,7 @@ class Scale(Resize): def __init__(self, *args, **kwargs): warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.") super().__init__(*args, **kwargs) + _log_api_usage_once("transforms", self.__class__.__name__) class CenterCrop(torch.nn.Module): @@ -367,6 +375,7 @@ class CenterCrop(torch.nn.Module): def __init__(self, size): super().__init__() + _log_api_usage_once("transforms", self.__class__.__name__) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") def forward(self, img): @@ -423,6 +432,7 @@ class Pad(torch.nn.Module): def __init__(self, padding, fill=0, padding_mode="constant"): super().__init__() + _log_api_usage_once("transforms", self.__class__.__name__) if not isinstance(padding, (numbers.Number, tuple, list)): raise TypeError("Got inappropriate padding arg") @@ -463,7 +473,7 @@ class Lambda: """ def __init__(self, lambd): - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) if not callable(lambd): raise TypeError(f"Argument lambd should be callable, got {repr(type(lambd).__name__)}") self.lambd = lambd @@ -483,7 +493,7 @@ class RandomTransforms: """ def __init__(self, transforms): - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) if not isinstance(transforms, Sequence): raise TypeError("Argument transforms should be a sequence") self.transforms = transforms @@ -522,7 +532,7 @@ class RandomApply(torch.nn.Module): def __init__(self, transforms, p=0.5): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.transforms = transforms self.p = p @@ -643,7 +653,7 @@ def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")) @@ -693,7 +703,7 @@ class RandomHorizontalFlip(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.p = p def forward(self, img): @@ -724,7 +734,7 @@ class RandomVerticalFlip(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.p = p def forward(self, img): @@ -762,7 +772,7 @@ class RandomPerspective(torch.nn.Module): def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.p = p # Backward compatibility with integer value @@ -875,7 +885,7 @@ class RandomResizedCrop(torch.nn.Module): def __init__(self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation=InterpolationMode.BILINEAR): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") if not isinstance(scale, Sequence): @@ -972,6 +982,7 @@ def __init__(self, *args, **kwargs): + "please use transforms.RandomResizedCrop instead." ) super().__init__(*args, **kwargs) + _log_api_usage_once("transforms", self.__class__.__name__) class FiveCrop(torch.nn.Module): @@ -1004,6 +1015,7 @@ class FiveCrop(torch.nn.Module): def __init__(self, size): super().__init__() + _log_api_usage_once("transforms", self.__class__.__name__) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") def forward(self, img): @@ -1052,6 +1064,7 @@ class TenCrop(torch.nn.Module): def __init__(self, size, vertical_flip=False): super().__init__() + _log_api_usage_once("transforms", self.__class__.__name__) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") self.vertical_flip = vertical_flip @@ -1090,7 +1103,7 @@ class LinearTransformation(torch.nn.Module): def __init__(self, transformation_matrix, mean_vector): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) if transformation_matrix.size(0) != transformation_matrix.size(1): raise ValueError( "transformation_matrix should be square. Got " @@ -1169,7 +1182,7 @@ class ColorJitter(torch.nn.Module): def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.brightness = self._check_input(brightness, "brightness") self.contrast = self._check_input(contrast, "contrast") self.saturation = self._check_input(saturation, "saturation") @@ -1292,7 +1305,7 @@ def __init__( self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None ): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1413,7 +1426,7 @@ def __init__( resample=None, ): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1558,7 +1571,7 @@ class Grayscale(torch.nn.Module): def __init__(self, num_output_channels=1): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.num_output_channels = num_output_channels def forward(self, img): @@ -1593,7 +1606,7 @@ class RandomGrayscale(torch.nn.Module): def __init__(self, p=0.1): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.p = p def forward(self, img): @@ -1643,7 +1656,7 @@ class RandomErasing(torch.nn.Module): def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) if not isinstance(value, (numbers.Number, str, tuple, list)): raise TypeError("Argument value should be either a number or str or a sequence") if isinstance(value, str) and value != "random": @@ -1767,7 +1780,7 @@ class GaussianBlur(torch.nn.Module): def __init__(self, kernel_size, sigma=(0.1, 2.0)): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers") for ks in self.kernel_size: if ks <= 0 or ks % 2 == 0: @@ -1859,7 +1872,7 @@ class RandomInvert(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.p = p def forward(self, img): @@ -1891,7 +1904,7 @@ class RandomPosterize(torch.nn.Module): def __init__(self, bits, p=0.5): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.bits = bits self.p = p @@ -1924,7 +1937,7 @@ class RandomSolarize(torch.nn.Module): def __init__(self, threshold, p=0.5): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.threshold = threshold self.p = p @@ -1957,7 +1970,7 @@ class RandomAdjustSharpness(torch.nn.Module): def __init__(self, sharpness_factor, p=0.5): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.sharpness_factor = sharpness_factor self.p = p @@ -1989,7 +2002,7 @@ class RandomAutocontrast(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.p = p def forward(self, img): @@ -2020,7 +2033,7 @@ class RandomEqualize(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once(self) + _log_api_usage_once("transforms", self.__class__.__name__) self.p = p def forward(self, img): From d0b9bf5af6b37a483a6ca5266a8d5d693368f76d Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Fri, 10 Dec 2021 07:21:33 +0000 Subject: [PATCH 6/9] torchscriptbility --- torchvision/transforms/transforms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index c7d72ebed8d..371b043f9a0 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -88,7 +88,7 @@ class Compose: """ def __init__(self, transforms): - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once("transforms", "Compose") self.transforms = transforms def __call__(self, img): From b209d85231767bf09a1cf662f42fcdeed885c80c Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Mon, 20 Dec 2021 19:35:52 +0000 Subject: [PATCH 7/9] adopt new API --- torchvision/transforms/functional.py | 105 ++++++++++++++++++--------- torchvision/transforms/transforms.py | 70 +++++++++--------- 2 files changed, 105 insertions(+), 70 deletions(-) diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 99f4ce7e203..7d7d5382291 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -68,7 +68,8 @@ def get_image_size(img: Tensor) -> List[int]: Returns: List[int]: The image size. """ - _log_api_usage_once("transforms", "get_image_size") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(get_image_size) if isinstance(img, torch.Tensor): return F_t.get_image_size(img) @@ -84,7 +85,8 @@ def get_image_num_channels(img: Tensor) -> int: Returns: int: The number of channels. """ - _log_api_usage_once("transforms", "get_image_num_channels") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(get_image_num_channels) if isinstance(img, torch.Tensor): return F_t.get_image_num_channels(img) @@ -113,7 +115,8 @@ def to_tensor(pic): Returns: Tensor: Converted image. """ - _log_api_usage_once("transforms", "to_tensor") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(to_tensor) if not (F_pil._is_pil_image(pic) or _is_numpy(pic)): raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}") @@ -170,7 +173,8 @@ def pil_to_tensor(pic): Returns: Tensor: Converted image. """ - _log_api_usage_once("transforms", "pil_to_tensor") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(pil_to_tensor) if not F_pil._is_pil_image(pic): raise TypeError(f"pic should be PIL Image. Got {type(pic)}") @@ -210,7 +214,8 @@ def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) - overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range of the integer ``dtype``. """ - _log_api_usage_once("transforms", "convert_image_dtype") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(convert_image_dtype) if not isinstance(image, torch.Tensor): raise TypeError("Input img should be Tensor Image") @@ -231,7 +236,8 @@ def to_pil_image(pic, mode=None): Returns: PIL Image: Image converted to PIL Image. """ - _log_api_usage_once("transforms", "to_pil_image") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(to_pil_image) if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)): raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.") @@ -329,7 +335,8 @@ def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool Returns: Tensor: Normalized Tensor image. """ - _log_api_usage_once("transforms", "normalize") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(normalize) if not isinstance(tensor, torch.Tensor): raise TypeError(f"Input tensor should be a torch tensor. Got {type(tensor)}.") @@ -409,7 +416,8 @@ def resize( Returns: PIL Image or Tensor: Resized image. """ - _log_api_usage_once("transforms", "resize") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(resize) # Backward compatibility with integer value if isinstance(interpolation, int): warnings.warn( @@ -431,7 +439,8 @@ def resize( def scale(*args, **kwargs): - _log_api_usage_once("transforms", "scale") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(scale) warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.") return resize(*args, **kwargs) @@ -477,7 +486,8 @@ def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "con Returns: PIL Image or Tensor: Padded image. """ - _log_api_usage_once("transforms", "pad") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(pad) if not isinstance(img, torch.Tensor): return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode) @@ -501,7 +511,8 @@ def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: PIL Image or Tensor: Cropped image. """ - _log_api_usage_once("transforms", "crop") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(crop) if not isinstance(img, torch.Tensor): return F_pil.crop(img, top, left, height, width) @@ -522,7 +533,8 @@ def center_crop(img: Tensor, output_size: List[int]) -> Tensor: Returns: PIL Image or Tensor: Cropped image. """ - _log_api_usage_once("transforms", "center_crop") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(center_crop) if isinstance(output_size, numbers.Number): output_size = (int(output_size), int(output_size)) elif isinstance(output_size, (tuple, list)) and len(output_size) == 1: @@ -579,7 +591,8 @@ def resized_crop( Returns: PIL Image or Tensor: Cropped image. """ - _log_api_usage_once("transforms", "resized_crop") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(resized_crop) img = crop(img, top, left, height, width) img = resize(img, size, interpolation) return img @@ -597,7 +610,8 @@ def hflip(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Horizontally flipped image. """ - _log_api_usage_once("transforms", "hflip") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(hflip) if not isinstance(img, torch.Tensor): return F_pil.hflip(img) @@ -663,7 +677,8 @@ def perspective( Returns: PIL Image or Tensor: transformed Image. """ - _log_api_usage_once("transforms", "perspective") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(perspective) coeffs = _get_perspective_coeffs(startpoints, endpoints) @@ -697,7 +712,8 @@ def vflip(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Vertically flipped image. """ - _log_api_usage_once("transforms", "vflip") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(vflip) if not isinstance(img, torch.Tensor): return F_pil.vflip(img) @@ -723,7 +739,8 @@ def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Ten tuple: tuple (tl, tr, bl, br, center) Corresponding top left, top right, bottom left, bottom right and center crop. """ - _log_api_usage_once("transforms", "five_crop") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(five_crop) if isinstance(size, numbers.Number): size = (int(size), int(size)) elif isinstance(size, (tuple, list)) and len(size) == 1: @@ -771,7 +788,8 @@ def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[ Corresponding top left, top right, bottom left, bottom right and center crop and same for the flipped image. """ - _log_api_usage_once("transforms", "ten_crop") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(ten_crop) if isinstance(size, numbers.Number): size = (int(size), int(size)) elif isinstance(size, (tuple, list)) and len(size) == 1: @@ -805,7 +823,8 @@ def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: Returns: PIL Image or Tensor: Brightness adjusted image. """ - _log_api_usage_once("transforms", "adjust_brightness") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_brightness) if not isinstance(img, torch.Tensor): return F_pil.adjust_brightness(img, brightness_factor) @@ -826,7 +845,8 @@ def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: Returns: PIL Image or Tensor: Contrast adjusted image. """ - _log_api_usage_once("transforms", "adjust_contrast") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_contrast) if not isinstance(img, torch.Tensor): return F_pil.adjust_contrast(img, contrast_factor) @@ -847,7 +867,8 @@ def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: Returns: PIL Image or Tensor: Saturation adjusted image. """ - _log_api_usage_once("transforms", "adjust_saturation") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_saturation) if not isinstance(img, torch.Tensor): return F_pil.adjust_saturation(img, saturation_factor) @@ -882,7 +903,8 @@ def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: Returns: PIL Image or Tensor: Hue adjusted image. """ - _log_api_usage_once("transforms", "adjust_hue") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_hue) if not isinstance(img, torch.Tensor): return F_pil.adjust_hue(img, hue_factor) @@ -914,7 +936,8 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: Returns: PIL Image or Tensor: Gamma correction adjusted image. """ - _log_api_usage_once("transforms", "adjust_gamma") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_gamma) if not isinstance(img, torch.Tensor): return F_pil.adjust_gamma(img, gamma, gain) @@ -1011,7 +1034,8 @@ def rotate( .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters """ - _log_api_usage_once("transforms", "rotate") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(rotate) if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1092,7 +1116,8 @@ def affine( Returns: PIL Image or Tensor: Transformed image. """ - _log_api_usage_once("transforms", "affine") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(affine) if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1177,7 +1202,8 @@ def to_grayscale(img, num_output_channels=1): - if num_output_channels = 1 : returned image is single channel - if num_output_channels = 3 : returned image is 3 channel with r = g = b """ - _log_api_usage_once("transforms", "to_grayscale") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(to_grayscale) if isinstance(img, Image.Image): return F_pil.to_grayscale(img, num_output_channels) @@ -1203,7 +1229,8 @@ def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: - if num_output_channels = 1 : returned image is single channel - if num_output_channels = 3 : returned image is 3 channel with r = g = b """ - _log_api_usage_once("transforms", "rgb_to_grayscale") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(rgb_to_grayscale) if not isinstance(img, torch.Tensor): return F_pil.to_grayscale(img, num_output_channels) @@ -1226,7 +1253,8 @@ def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool Returns: Tensor Image: Erased image. """ - _log_api_usage_once("transforms", "erase") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(erase) if not isinstance(img, torch.Tensor): raise TypeError(f"img should be Tensor Image. Got {type(img)}") @@ -1263,7 +1291,8 @@ def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[floa Returns: PIL Image or Tensor: Gaussian Blurred version of the image. """ - _log_api_usage_once("transforms", "gaussian_blur") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(gaussian_blur) if not isinstance(kernel_size, (int, list, tuple)): raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}") if isinstance(kernel_size, int): @@ -1315,7 +1344,8 @@ def invert(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: Color inverted image. """ - _log_api_usage_once("transforms", "invert") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(invert) if not isinstance(img, torch.Tensor): return F_pil.invert(img) @@ -1335,7 +1365,8 @@ def posterize(img: Tensor, bits: int) -> Tensor: Returns: PIL Image or Tensor: Posterized image. """ - _log_api_usage_once("transforms", "posterize") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(posterize) if not (0 <= bits <= 8): raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}") @@ -1357,7 +1388,8 @@ def solarize(img: Tensor, threshold: float) -> Tensor: Returns: PIL Image or Tensor: Solarized image. """ - _log_api_usage_once("transforms", "solarize") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(solarize) if not isinstance(img, torch.Tensor): return F_pil.solarize(img, threshold) @@ -1378,7 +1410,8 @@ def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: Returns: PIL Image or Tensor: Sharpness adjusted image. """ - _log_api_usage_once("transforms", "adjust_sharpness") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(adjust_sharpness) if not isinstance(img, torch.Tensor): return F_pil.adjust_sharpness(img, sharpness_factor) @@ -1399,7 +1432,8 @@ def autocontrast(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: An image that was autocontrasted. """ - _log_api_usage_once("transforms", "autocontrast") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(autocontrast) if not isinstance(img, torch.Tensor): return F_pil.autocontrast(img) @@ -1421,7 +1455,8 @@ def equalize(img: Tensor) -> Tensor: Returns: PIL Image or Tensor: An image that was equalized. """ - _log_api_usage_once("transforms", "equalize") + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(equalize) if not isinstance(img, torch.Tensor): return F_pil.equalize(img) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index 371b043f9a0..cd988d410c4 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -88,7 +88,7 @@ class Compose: """ def __init__(self, transforms): - _log_api_usage_once("transforms", "Compose") + _log_api_usage_once(self) self.transforms = transforms def __call__(self, img): @@ -130,7 +130,7 @@ def __call__(self, pic): Returns: Tensor: Converted image. """ - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) return F.to_tensor(pic) def __repr__(self): @@ -155,7 +155,7 @@ def __call__(self, pic): Returns: Tensor: Converted image. """ - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) return F.pil_to_tensor(pic) def __repr__(self): @@ -183,7 +183,7 @@ class ConvertImageDtype(torch.nn.Module): def __init__(self, dtype: torch.dtype) -> None: super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.dtype = dtype def forward(self, image): @@ -209,7 +209,7 @@ class ToPILImage: """ def __init__(self, mode=None): - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.mode = mode def __call__(self, pic): @@ -251,7 +251,7 @@ class Normalize(torch.nn.Module): def __init__(self, mean, std, inplace=False): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.mean = mean self.std = std self.inplace = inplace @@ -316,7 +316,7 @@ class Resize(torch.nn.Module): def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None, antialias=None): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) if not isinstance(size, (int, Sequence)): raise TypeError(f"Size should be int or sequence. Got {type(size)}") if isinstance(size, Sequence) and len(size) not in (1, 2): @@ -358,7 +358,7 @@ class Scale(Resize): def __init__(self, *args, **kwargs): warnings.warn("The use of the transforms.Scale transform is deprecated, please use transforms.Resize instead.") super().__init__(*args, **kwargs) - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) class CenterCrop(torch.nn.Module): @@ -375,7 +375,7 @@ class CenterCrop(torch.nn.Module): def __init__(self, size): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") def forward(self, img): @@ -432,7 +432,7 @@ class Pad(torch.nn.Module): def __init__(self, padding, fill=0, padding_mode="constant"): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) if not isinstance(padding, (numbers.Number, tuple, list)): raise TypeError("Got inappropriate padding arg") @@ -473,7 +473,7 @@ class Lambda: """ def __init__(self, lambd): - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) if not callable(lambd): raise TypeError(f"Argument lambd should be callable, got {repr(type(lambd).__name__)}") self.lambd = lambd @@ -493,7 +493,7 @@ class RandomTransforms: """ def __init__(self, transforms): - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) if not isinstance(transforms, Sequence): raise TypeError("Argument transforms should be a sequence") self.transforms = transforms @@ -532,7 +532,7 @@ class RandomApply(torch.nn.Module): def __init__(self, transforms, p=0.5): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.transforms = transforms self.p = p @@ -653,7 +653,7 @@ def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")) @@ -703,7 +703,7 @@ class RandomHorizontalFlip(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -734,7 +734,7 @@ class RandomVerticalFlip(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -772,7 +772,7 @@ class RandomPerspective(torch.nn.Module): def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.p = p # Backward compatibility with integer value @@ -885,7 +885,7 @@ class RandomResizedCrop(torch.nn.Module): def __init__(self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation=InterpolationMode.BILINEAR): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") if not isinstance(scale, Sequence): @@ -982,7 +982,7 @@ def __init__(self, *args, **kwargs): + "please use transforms.RandomResizedCrop instead." ) super().__init__(*args, **kwargs) - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) class FiveCrop(torch.nn.Module): @@ -1015,7 +1015,7 @@ class FiveCrop(torch.nn.Module): def __init__(self, size): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") def forward(self, img): @@ -1064,7 +1064,7 @@ class TenCrop(torch.nn.Module): def __init__(self, size, vertical_flip=False): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") self.vertical_flip = vertical_flip @@ -1103,7 +1103,7 @@ class LinearTransformation(torch.nn.Module): def __init__(self, transformation_matrix, mean_vector): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) if transformation_matrix.size(0) != transformation_matrix.size(1): raise ValueError( "transformation_matrix should be square. Got " @@ -1182,7 +1182,7 @@ class ColorJitter(torch.nn.Module): def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.brightness = self._check_input(brightness, "brightness") self.contrast = self._check_input(contrast, "contrast") self.saturation = self._check_input(saturation, "saturation") @@ -1305,7 +1305,7 @@ def __init__( self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None ): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1426,7 +1426,7 @@ def __init__( resample=None, ): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) if resample is not None: warnings.warn( "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" @@ -1571,7 +1571,7 @@ class Grayscale(torch.nn.Module): def __init__(self, num_output_channels=1): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.num_output_channels = num_output_channels def forward(self, img): @@ -1606,7 +1606,7 @@ class RandomGrayscale(torch.nn.Module): def __init__(self, p=0.1): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -1656,7 +1656,7 @@ class RandomErasing(torch.nn.Module): def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) if not isinstance(value, (numbers.Number, str, tuple, list)): raise TypeError("Argument value should be either a number or str or a sequence") if isinstance(value, str) and value != "random": @@ -1780,7 +1780,7 @@ class GaussianBlur(torch.nn.Module): def __init__(self, kernel_size, sigma=(0.1, 2.0)): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers") for ks in self.kernel_size: if ks <= 0 or ks % 2 == 0: @@ -1872,7 +1872,7 @@ class RandomInvert(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -1904,7 +1904,7 @@ class RandomPosterize(torch.nn.Module): def __init__(self, bits, p=0.5): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.bits = bits self.p = p @@ -1937,7 +1937,7 @@ class RandomSolarize(torch.nn.Module): def __init__(self, threshold, p=0.5): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.threshold = threshold self.p = p @@ -1970,7 +1970,7 @@ class RandomAdjustSharpness(torch.nn.Module): def __init__(self, sharpness_factor, p=0.5): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.sharpness_factor = sharpness_factor self.p = p @@ -2002,7 +2002,7 @@ class RandomAutocontrast(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.p = p def forward(self, img): @@ -2033,7 +2033,7 @@ class RandomEqualize(torch.nn.Module): def __init__(self, p=0.5): super().__init__() - _log_api_usage_once("transforms", self.__class__.__name__) + _log_api_usage_once(self) self.p = p def forward(self, img): From 06a93c357169d5e9290f0ef604c836b83ee70d69 Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Mon, 20 Dec 2021 22:15:43 +0000 Subject: [PATCH 8/9] make Compose scriptable --- torchvision/transforms/transforms.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index cd988d410c4..fc7b1bd8ba8 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -88,7 +88,8 @@ class Compose: """ def __init__(self, transforms): - _log_api_usage_once(self) + if not torch.jit.is_scripting() and not torch.jit.is_tracing(): + _log_api_usage_once(self) self.transforms = transforms def __call__(self, img): From a84e56ad37b000f581da2e41d0cd08550e08530a Mon Sep 17 00:00:00 2001 From: Kai Zhang Date: Tue, 21 Dec 2021 01:28:42 +0000 Subject: [PATCH 9/9] move from __call__ to __init__ --- torchvision/transforms/transforms.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index fc7b1bd8ba8..5cacecab625 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -123,6 +123,9 @@ class ToTensor: .. _references: https://github.com/pytorch/vision/tree/main/references/segmentation """ + def __init__(self) -> None: + _log_api_usage_once(self) + def __call__(self, pic): """ Args: @@ -131,7 +134,6 @@ def __call__(self, pic): Returns: Tensor: Converted image. """ - _log_api_usage_once(self) return F.to_tensor(pic) def __repr__(self): @@ -144,6 +146,9 @@ class PILToTensor: Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W). """ + def __init__(self) -> None: + _log_api_usage_once(self) + def __call__(self, pic): """ .. note:: @@ -156,7 +161,6 @@ def __call__(self, pic): Returns: Tensor: Converted image. """ - _log_api_usage_once(self) return F.pil_to_tensor(pic) def __repr__(self):