From fe66c0a109c5037cc9950fe37dda723310afd221 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Mon, 14 Feb 2022 08:22:21 +0100 Subject: [PATCH 1/2] add all legacy kernels --- .../prototype/transforms/functional/_color.py | 24 +++++++ .../transforms/functional/_geometry.py | 72 +++++++++++++++++++ .../prototype/transforms/functional/_misc.py | 13 ++++ .../prototype/transforms/kernels/__init__.py | 10 ++- .../prototype/transforms/kernels/_color.py | 2 + .../prototype/transforms/kernels/_geometry.py | 6 ++ .../prototype/transforms/kernels/_misc.py | 1 + 7 files changed, 127 insertions(+), 1 deletion(-) diff --git a/torchvision/prototype/transforms/functional/_color.py b/torchvision/prototype/transforms/functional/_color.py index 23e128b7856..fce44e62ca9 100644 --- a/torchvision/prototype/transforms/functional/_color.py +++ b/torchvision/prototype/transforms/functional/_color.py @@ -117,3 +117,27 @@ def equalize(input: T, *args: Any, **kwargs: Any) -> T: def invert(input: T, *args: Any, **kwargs: Any) -> T: """ADDME""" ... + + +@dispatch( + { + torch.Tensor: _F.adjust_hue, + PIL.Image.Image: _F.adjust_hue, + features.Image: K.adjust_hue_image, + } +) +def adjust_hue(input: T, *args: Any, **kwargs: Any) -> T: + """ADDME""" + ... + + +@dispatch( + { + torch.Tensor: _F.adjust_gamma, + PIL.Image.Image: _F.adjust_gamma, + features.Image: K.adjust_gamma_image, + } +) +def adjust_gamma(input: T, *args: Any, **kwargs: Any) -> T: + """ADDME""" + ... diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index 147baa3a066..323234a9f7f 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -93,3 +93,75 @@ def affine(input: T, *args: Any, **kwargs: Any) -> T: def rotate(input: T, *args: Any, **kwargs: Any) -> T: """ADDME""" ... + + +@dispatch( + { + torch.Tensor: _F.pad, + PIL.Image.Image: _F.pad, + features.Image: K.pad_image, + } +) +def pad(input: T, *args: Any, **kwargs: Any) -> T: + """ADDME""" + ... + + +@dispatch( + { + torch.Tensor: _F.crop, + PIL.Image.Image: _F.crop, + features.Image: K.crop_image, + } +) +def crop(input: T, *args: Any, **kwargs: Any) -> T: + """ADDME""" + ... + + +@dispatch( + { + torch.Tensor: _F.perspective, + PIL.Image.Image: _F.perspective, + features.Image: K.perspective_image, + } +) +def perspective(input: T, *args: Any, **kwargs: Any) -> T: + """ADDME""" + ... + + +@dispatch( + { + torch.Tensor: _F.vflip, + PIL.Image.Image: _F.vflip, + features.Image: K.vertical_flip_image, + } +) +def vertical_flip(input: T, *args: Any, **kwargs: Any) -> T: + """ADDME""" + ... + + +@dispatch( + { + torch.Tensor: _F.five_crop, + PIL.Image.Image: _F.five_crop, + features.Image: K.five_crop_image, + } +) +def five_crop(input: T, *args: Any, **kwargs: Any) -> T: + """ADDME""" + ... + + +@dispatch( + { + torch.Tensor: _F.ten_crop, + PIL.Image.Image: _F.ten_crop, + features.Image: K.ten_crop_image, + } +) +def ten_crop(input: T, *args: Any, **kwargs: Any) -> T: + """ADDME""" + ... diff --git a/torchvision/prototype/transforms/functional/_misc.py b/torchvision/prototype/transforms/functional/_misc.py index 7cf0765105a..fd262cb855b 100644 --- a/torchvision/prototype/transforms/functional/_misc.py +++ b/torchvision/prototype/transforms/functional/_misc.py @@ -1,5 +1,6 @@ from typing import TypeVar, Any +import PIL.Image import torch from torchvision.prototype import features from torchvision.prototype.transforms import kernels as K @@ -19,3 +20,15 @@ def normalize(input: T, *args: Any, **kwargs: Any) -> T: """ADDME""" ... + + +@dispatch( + { + torch.Tensor: _F.gaussian_blur, + PIL.Image.Image: _F.gaussian_blur, + features.Image: K.gaussian_blur_image, + } +) +def ten_gaussian_blur(input: T, *args: Any, **kwargs: Any) -> T: + """ADDME""" + ... diff --git a/torchvision/prototype/transforms/kernels/__init__.py b/torchvision/prototype/transforms/kernels/__init__.py index 6f74f6af0e9..1cac91d29c1 100644 --- a/torchvision/prototype/transforms/kernels/__init__.py +++ b/torchvision/prototype/transforms/kernels/__init__.py @@ -18,6 +18,8 @@ autocontrast_image, equalize_image, invert_image, + adjust_hue_image, + adjust_gamma_image, ) from ._geometry import ( horizontal_flip_bounding_box, @@ -29,6 +31,12 @@ resized_crop_image, affine_image, rotate_image, + pad_image, + crop_image, + perspective_image, + vertical_flip_image, + five_crop_image, + ten_crop_image, ) -from ._misc import normalize_image +from ._misc import normalize_image, gaussian_blur_image from ._type_conversion import decode_image_with_pil, decode_video_with_av, label_to_one_hot diff --git a/torchvision/prototype/transforms/kernels/_color.py b/torchvision/prototype/transforms/kernels/_color.py index 0d828e6d169..00ed5cfbfc7 100644 --- a/torchvision/prototype/transforms/kernels/_color.py +++ b/torchvision/prototype/transforms/kernels/_color.py @@ -10,3 +10,5 @@ autocontrast_image = _F.autocontrast equalize_image = _F.equalize invert_image = _F.invert +adjust_hue_image = _F.adjust_hue +adjust_gamma_image = _F.adjust_gamma diff --git a/torchvision/prototype/transforms/kernels/_geometry.py b/torchvision/prototype/transforms/kernels/_geometry.py index fb25f0fdf47..72afc2e62a3 100644 --- a/torchvision/prototype/transforms/kernels/_geometry.py +++ b/torchvision/prototype/transforms/kernels/_geometry.py @@ -68,3 +68,9 @@ def resize_bounding_box(bounding_box: torch.Tensor, *, size: List[int], image_si resized_crop_image = _F.resized_crop affine_image = _F.affine rotate_image = _F.rotate +pad_image = _F.pad +crop_image = _F.crop +perspective_image = _F.perspective +vertical_flip_image = _F.vflip +five_crop_image = _F.five_crop +ten_crop_image = _F.ten_crop diff --git a/torchvision/prototype/transforms/kernels/_misc.py b/torchvision/prototype/transforms/kernels/_misc.py index de148ab194a..f4e2c69c7ee 100644 --- a/torchvision/prototype/transforms/kernels/_misc.py +++ b/torchvision/prototype/transforms/kernels/_misc.py @@ -2,3 +2,4 @@ normalize_image = _F.normalize +gaussian_blur_image = _F.gaussian_blur From 5e5c602597a3af1c52247649cee1e2208397a1b7 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Mon, 14 Feb 2022 08:24:10 +0100 Subject: [PATCH 2/2] clarify dispatcher docstrings --- .../transforms/functional/_augment.py | 4 ++-- .../prototype/transforms/functional/_color.py | 22 ++++++++--------- .../transforms/functional/_geometry.py | 24 +++++++++---------- .../prototype/transforms/functional/_misc.py | 4 ++-- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/torchvision/prototype/transforms/functional/_augment.py b/torchvision/prototype/transforms/functional/_augment.py index 2eafe0d3c1f..e5e93aa0b4f 100644 --- a/torchvision/prototype/transforms/functional/_augment.py +++ b/torchvision/prototype/transforms/functional/_augment.py @@ -17,7 +17,7 @@ } ) def erase(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -28,7 +28,7 @@ def erase(input: T, *args: Any, **kwargs: Any) -> T: } ) def mixup(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... diff --git a/torchvision/prototype/transforms/functional/_color.py b/torchvision/prototype/transforms/functional/_color.py index fce44e62ca9..290ae7094ce 100644 --- a/torchvision/prototype/transforms/functional/_color.py +++ b/torchvision/prototype/transforms/functional/_color.py @@ -19,7 +19,7 @@ } ) def adjust_brightness(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -31,7 +31,7 @@ def adjust_brightness(input: T, *args: Any, **kwargs: Any) -> T: } ) def adjust_saturation(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -43,7 +43,7 @@ def adjust_saturation(input: T, *args: Any, **kwargs: Any) -> T: } ) def adjust_contrast(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -55,7 +55,7 @@ def adjust_contrast(input: T, *args: Any, **kwargs: Any) -> T: } ) def adjust_sharpness(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -67,7 +67,7 @@ def adjust_sharpness(input: T, *args: Any, **kwargs: Any) -> T: } ) def posterize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -79,7 +79,7 @@ def posterize(input: T, *args: Any, **kwargs: Any) -> T: } ) def solarize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -91,7 +91,7 @@ def solarize(input: T, *args: Any, **kwargs: Any) -> T: } ) def autocontrast(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -103,7 +103,7 @@ def autocontrast(input: T, *args: Any, **kwargs: Any) -> T: } ) def equalize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -115,7 +115,7 @@ def equalize(input: T, *args: Any, **kwargs: Any) -> T: } ) def invert(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -127,7 +127,7 @@ def invert(input: T, *args: Any, **kwargs: Any) -> T: } ) def adjust_hue(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -139,5 +139,5 @@ def adjust_hue(input: T, *args: Any, **kwargs: Any) -> T: } ) def adjust_gamma(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index 323234a9f7f..ae930bfc5f1 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -20,7 +20,7 @@ }, ) def horizontal_flip(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" if isinstance(input, features.BoundingBox): output = K.horizontal_flip_bounding_box(input, format=input.format, image_size=input.image_size) return cast(T, features.BoundingBox.new_like(input, output)) @@ -38,7 +38,7 @@ def horizontal_flip(input: T, *args: Any, **kwargs: Any) -> T: } ) def resize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" if isinstance(input, features.BoundingBox): size = kwargs.pop("size") output = K.resize_bounding_box(input, size=size, image_size=input.image_size) @@ -55,7 +55,7 @@ def resize(input: T, *args: Any, **kwargs: Any) -> T: } ) def center_crop(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -67,7 +67,7 @@ def center_crop(input: T, *args: Any, **kwargs: Any) -> T: } ) def resized_crop(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -79,7 +79,7 @@ def resized_crop(input: T, *args: Any, **kwargs: Any) -> T: } ) def affine(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -91,7 +91,7 @@ def affine(input: T, *args: Any, **kwargs: Any) -> T: } ) def rotate(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -103,7 +103,7 @@ def rotate(input: T, *args: Any, **kwargs: Any) -> T: } ) def pad(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -115,7 +115,7 @@ def pad(input: T, *args: Any, **kwargs: Any) -> T: } ) def crop(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -127,7 +127,7 @@ def crop(input: T, *args: Any, **kwargs: Any) -> T: } ) def perspective(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -139,7 +139,7 @@ def perspective(input: T, *args: Any, **kwargs: Any) -> T: } ) def vertical_flip(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -151,7 +151,7 @@ def vertical_flip(input: T, *args: Any, **kwargs: Any) -> T: } ) def five_crop(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -163,5 +163,5 @@ def five_crop(input: T, *args: Any, **kwargs: Any) -> T: } ) def ten_crop(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... diff --git a/torchvision/prototype/transforms/functional/_misc.py b/torchvision/prototype/transforms/functional/_misc.py index fd262cb855b..45e1bdefd3d 100644 --- a/torchvision/prototype/transforms/functional/_misc.py +++ b/torchvision/prototype/transforms/functional/_misc.py @@ -18,7 +18,7 @@ } ) def normalize(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ... @@ -30,5 +30,5 @@ def normalize(input: T, *args: Any, **kwargs: Any) -> T: } ) def ten_gaussian_blur(input: T, *args: Any, **kwargs: Any) -> T: - """ADDME""" + """TODO: add docstring""" ...