Skip to content

Commit

Permalink
Remove private PIL kernels from __init__.py (#8329)
Browse files Browse the repository at this point in the history
  • Loading branch information
NicolasHug committed Mar 18, 2024
1 parent 924b162 commit 9cb639a
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 65 deletions.
68 changes: 34 additions & 34 deletions test/test_transforms_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -668,7 +668,7 @@ def test_functional(self, size, make_input):
("kernel", "input_type"),
[
(F.resize_image, torch.Tensor),
(F._resize_image_pil, PIL.Image.Image),
(F._geometry._resize_image_pil, PIL.Image.Image),
(F.resize_image, tv_tensors.Image),
(F.resize_bounding_boxes, tv_tensors.BoundingBoxes),
(F.resize_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -986,7 +986,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.horizontal_flip_image, torch.Tensor),
(F._horizontal_flip_image_pil, PIL.Image.Image),
(F._geometry._horizontal_flip_image_pil, PIL.Image.Image),
(F.horizontal_flip_image, tv_tensors.Image),
(F.horizontal_flip_bounding_boxes, tv_tensors.BoundingBoxes),
(F.horizontal_flip_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -1154,7 +1154,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.affine_image, torch.Tensor),
(F._affine_image_pil, PIL.Image.Image),
(F._geometry._affine_image_pil, PIL.Image.Image),
(F.affine_image, tv_tensors.Image),
(F.affine_bounding_boxes, tv_tensors.BoundingBoxes),
(F.affine_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -1436,7 +1436,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.vertical_flip_image, torch.Tensor),
(F._vertical_flip_image_pil, PIL.Image.Image),
(F._geometry._vertical_flip_image_pil, PIL.Image.Image),
(F.vertical_flip_image, tv_tensors.Image),
(F.vertical_flip_bounding_boxes, tv_tensors.BoundingBoxes),
(F.vertical_flip_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -1578,7 +1578,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.rotate_image, torch.Tensor),
(F._rotate_image_pil, PIL.Image.Image),
(F._geometry._rotate_image_pil, PIL.Image.Image),
(F.rotate_image, tv_tensors.Image),
(F.rotate_bounding_boxes, tv_tensors.BoundingBoxes),
(F.rotate_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -2149,7 +2149,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.adjust_brightness_image, torch.Tensor),
(F._adjust_brightness_image_pil, PIL.Image.Image),
(F._color._adjust_brightness_image_pil, PIL.Image.Image),
(F.adjust_brightness_image, tv_tensors.Image),
(F.adjust_brightness_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -2311,7 +2311,7 @@ class TestShapeGetters:
("kernel", "make_input"),
[
(F.get_dimensions_image, make_image_tensor),
(F._get_dimensions_image_pil, make_image_pil),
(F._meta._get_dimensions_image_pil, make_image_pil),
(F.get_dimensions_image, make_image),
(F.get_dimensions_video, make_video),
],
Expand All @@ -2328,7 +2328,7 @@ def test_get_dimensions(self, kernel, make_input):
("kernel", "make_input"),
[
(F.get_num_channels_image, make_image_tensor),
(F._get_num_channels_image_pil, make_image_pil),
(F._meta._get_num_channels_image_pil, make_image_pil),
(F.get_num_channels_image, make_image),
(F.get_num_channels_video, make_video),
],
Expand All @@ -2344,7 +2344,7 @@ def test_get_num_channels(self, kernel, make_input):
("kernel", "make_input"),
[
(F.get_size_image, make_image_tensor),
(F._get_size_image_pil, make_image_pil),
(F._meta._get_size_image_pil, make_image_pil),
(F.get_size_image, make_image),
(F.get_size_bounding_boxes, make_bounding_boxes),
(F.get_size_mask, make_detection_masks),
Expand Down Expand Up @@ -2451,7 +2451,7 @@ class TestGetKernel:
# would also be fine
KERNELS = {
torch.Tensor: F.resize_image,
PIL.Image.Image: F._resize_image_pil,
PIL.Image.Image: F._geometry._resize_image_pil,
tv_tensors.Image: F.resize_image,
tv_tensors.BoundingBoxes: F.resize_bounding_boxes,
tv_tensors.Mask: F.resize_mask,
Expand Down Expand Up @@ -2568,7 +2568,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.permute_channels_image, torch.Tensor),
(F._permute_channels_image_pil, PIL.Image.Image),
(F._color._permute_channels_image_pil, PIL.Image.Image),
(F.permute_channels_image, tv_tensors.Image),
(F.permute_channels_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -2655,7 +2655,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.elastic_image, torch.Tensor),
(F._elastic_image_pil, PIL.Image.Image),
(F._geometry._elastic_image_pil, PIL.Image.Image),
(F.elastic_image, tv_tensors.Image),
(F.elastic_bounding_boxes, tv_tensors.BoundingBoxes),
(F.elastic_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -2772,7 +2772,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.crop_image, torch.Tensor),
(F._crop_image_pil, PIL.Image.Image),
(F._geometry._crop_image_pil, PIL.Image.Image),
(F.crop_image, tv_tensors.Image),
(F.crop_bounding_boxes, tv_tensors.BoundingBoxes),
(F.crop_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -2994,7 +2994,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.erase_image, torch.Tensor),
(F._erase_image_pil, PIL.Image.Image),
(F._augment._erase_image_pil, PIL.Image.Image),
(F.erase_image, tv_tensors.Image),
(F.erase_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -3133,7 +3133,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.gaussian_blur_image, torch.Tensor),
(F._gaussian_blur_image_pil, PIL.Image.Image),
(F._misc._gaussian_blur_image_pil, PIL.Image.Image),
(F.gaussian_blur_image, tv_tensors.Image),
(F.gaussian_blur_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -3515,7 +3515,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.resized_crop_image, torch.Tensor),
(F._resized_crop_image_pil, PIL.Image.Image),
(F._geometry._resized_crop_image_pil, PIL.Image.Image),
(F.resized_crop_image, tv_tensors.Image),
(F.resized_crop_bounding_boxes, tv_tensors.BoundingBoxes),
(F.resized_crop_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -3700,7 +3700,7 @@ def test_functional(self, make_input):
# Since the whole fill story is already really inconsistent, we won't introduce yet another case to allow
# for this test to pass.
# See https://github.com/pytorch/vision/issues/6623 for a discussion.
# (F._pad_image_pil, PIL.Image.Image),
# (F._geometry._pad_image_pil, PIL.Image.Image),
(F.pad_image, tv_tensors.Image),
(F.pad_bounding_boxes, tv_tensors.BoundingBoxes),
(F.pad_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -3828,7 +3828,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.center_crop_image, torch.Tensor),
(F._center_crop_image_pil, PIL.Image.Image),
(F._geometry._center_crop_image_pil, PIL.Image.Image),
(F.center_crop_image, tv_tensors.Image),
(F.center_crop_bounding_boxes, tv_tensors.BoundingBoxes),
(F.center_crop_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -3994,7 +3994,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.perspective_image, torch.Tensor),
(F._perspective_image_pil, PIL.Image.Image),
(F._geometry._perspective_image_pil, PIL.Image.Image),
(F.perspective_image, tv_tensors.Image),
(F.perspective_bounding_boxes, tv_tensors.BoundingBoxes),
(F.perspective_mask, tv_tensors.Mask),
Expand Down Expand Up @@ -4151,7 +4151,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.equalize_image, torch.Tensor),
(F._equalize_image_pil, PIL.Image.Image),
(F._color._equalize_image_pil, PIL.Image.Image),
(F.equalize_image, tv_tensors.Image),
(F.equalize_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4381,7 +4381,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.invert_image, torch.Tensor),
(F._invert_image_pil, PIL.Image.Image),
(F._color._invert_image_pil, PIL.Image.Image),
(F.invert_image, tv_tensors.Image),
(F.invert_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4420,7 +4420,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.posterize_image, torch.Tensor),
(F._posterize_image_pil, PIL.Image.Image),
(F._color._posterize_image_pil, PIL.Image.Image),
(F.posterize_image, tv_tensors.Image),
(F.posterize_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4467,7 +4467,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.solarize_image, torch.Tensor),
(F._solarize_image_pil, PIL.Image.Image),
(F._color._solarize_image_pil, PIL.Image.Image),
(F.solarize_image, tv_tensors.Image),
(F.solarize_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4514,7 +4514,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.autocontrast_image, torch.Tensor),
(F._autocontrast_image_pil, PIL.Image.Image),
(F._color._autocontrast_image_pil, PIL.Image.Image),
(F.autocontrast_image, tv_tensors.Image),
(F.autocontrast_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4553,7 +4553,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.adjust_sharpness_image, torch.Tensor),
(F._adjust_sharpness_image_pil, PIL.Image.Image),
(F._color._adjust_sharpness_image_pil, PIL.Image.Image),
(F.adjust_sharpness_image, tv_tensors.Image),
(F.adjust_sharpness_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4602,7 +4602,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.adjust_contrast_image, torch.Tensor),
(F._adjust_contrast_image_pil, PIL.Image.Image),
(F._color._adjust_contrast_image_pil, PIL.Image.Image),
(F.adjust_contrast_image, tv_tensors.Image),
(F.adjust_contrast_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4644,7 +4644,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.adjust_gamma_image, torch.Tensor),
(F._adjust_gamma_image_pil, PIL.Image.Image),
(F._color._adjust_gamma_image_pil, PIL.Image.Image),
(F.adjust_gamma_image, tv_tensors.Image),
(F.adjust_gamma_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4684,7 +4684,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.adjust_hue_image, torch.Tensor),
(F._adjust_hue_image_pil, PIL.Image.Image),
(F._color._adjust_hue_image_pil, PIL.Image.Image),
(F.adjust_hue_image, tv_tensors.Image),
(F.adjust_hue_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4728,7 +4728,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.adjust_saturation_image, torch.Tensor),
(F._adjust_saturation_image_pil, PIL.Image.Image),
(F._color._adjust_saturation_image_pil, PIL.Image.Image),
(F.adjust_saturation_image, tv_tensors.Image),
(F.adjust_saturation_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4799,11 +4799,11 @@ def test_functional(self, make_input, functional):
("functional", "kernel", "input_type"),
[
(F.five_crop, F.five_crop_image, torch.Tensor),
(F.five_crop, F._five_crop_image_pil, PIL.Image.Image),
(F.five_crop, F._geometry._five_crop_image_pil, PIL.Image.Image),
(F.five_crop, F.five_crop_image, tv_tensors.Image),
(F.five_crop, F.five_crop_video, tv_tensors.Video),
(F.ten_crop, F.ten_crop_image, torch.Tensor),
(F.ten_crop, F._ten_crop_image_pil, PIL.Image.Image),
(F.ten_crop, F._geometry._ten_crop_image_pil, PIL.Image.Image),
(F.ten_crop, F.ten_crop_image, tv_tensors.Image),
(F.ten_crop, F.ten_crop_video, tv_tensors.Video),
],
Expand Down Expand Up @@ -4955,7 +4955,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.rgb_to_grayscale_image, torch.Tensor),
(F._rgb_to_grayscale_image_pil, PIL.Image.Image),
(F._color._rgb_to_grayscale_image_pil, PIL.Image.Image),
(F.rgb_to_grayscale_image, tv_tensors.Image),
],
)
Expand Down Expand Up @@ -5019,7 +5019,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.rgb_to_grayscale_image, torch.Tensor),
(F._rgb_to_grayscale_image_pil, PIL.Image.Image),
(F._color._rgb_to_grayscale_image_pil, PIL.Image.Image),
(F.rgb_to_grayscale_image, tv_tensors.Image),
],
)
Expand Down Expand Up @@ -5951,7 +5951,7 @@ def test_functional(self, make_input):
("kernel", "input_type"),
[
(F.jpeg_image, torch.Tensor),
(F._jpeg_image_pil, PIL.Image.Image),
(F._augment._jpeg_image_pil, PIL.Image.Image),
(F.jpeg_image, tv_tensors.Image),
(F.jpeg_video, tv_tensors.Video),
],
Expand Down
32 changes: 1 addition & 31 deletions torchvision/transforms/v2/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,39 +6,23 @@
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip

from ._augment import _erase_image_pil, _jpeg_image_pil, erase, erase_image, erase_video, jpeg, jpeg_image, jpeg_video
from ._augment import erase, erase_image, erase_video, jpeg, jpeg_image, jpeg_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
Expand Down Expand Up @@ -82,19 +66,6 @@
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
Expand Down Expand Up @@ -161,7 +132,6 @@
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
Expand Down

0 comments on commit 9cb639a

Please sign in to comment.