Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
286 changes: 152 additions & 134 deletions test/test_transforms_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import numpy as np

import unittest
import pytest
from typing import Sequence

from common_utils import (
Expand All @@ -17,6 +18,7 @@
_create_data_batch,
_assert_equal_tensor_to_pil,
_assert_approx_equal_tensor_to_pil,
cpu_and_gpu
)
from _assert_utils import assert_equal

Expand Down Expand Up @@ -140,140 +142,6 @@ def test_random_autocontrast(self):
def test_random_equalize(self):
_test_op(F.equalize, T.RandomEqualize, device=self.device)

def test_color_jitter(self):

tol = 1.0 + 1e-10
for f in [0.1, 0.5, 1.0, 1.34, (0.3, 0.7), [0.4, 0.5]]:
meth_kwargs = {"brightness": f}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=self.device,
tol=tol, agg_method="max"
)

for f in [0.2, 0.5, 1.0, 1.5, (0.3, 0.7), [0.4, 0.5]]:
meth_kwargs = {"contrast": f}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=self.device,
tol=tol, agg_method="max"
)

for f in [0.5, 0.75, 1.0, 1.25, (0.3, 0.7), [0.3, 0.4]]:
meth_kwargs = {"saturation": f}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=self.device,
tol=tol, agg_method="max"
)

for f in [0.2, 0.5, (-0.2, 0.3), [-0.4, 0.5]]:
meth_kwargs = {"hue": f}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=self.device,
tol=16.1, agg_method="max"
)

# All 4 parameters together
meth_kwargs = {"brightness": 0.2, "contrast": 0.2, "saturation": 0.2, "hue": 0.2}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=self.device,
tol=12.1, agg_method="max"
)

def test_pad(self):
for m in ["constant", "edge", "reflect", "symmetric"]:
fill = 127 if m == "constant" else 0
for mul in [1, -1]:
# Test functional.pad (PIL and Tensor) with padding as single int
_test_functional_op(
F.pad, fn_kwargs={"padding": mul * 2, "fill": fill, "padding_mode": m},
device=self.device
)
# Test functional.pad and transforms.Pad with padding as [int, ]
fn_kwargs = meth_kwargs = {"padding": [mul * 2, ], "fill": fill, "padding_mode": m}
_test_op(
F.pad, T.Pad, device=self.device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)
# Test functional.pad and transforms.Pad with padding as list
fn_kwargs = meth_kwargs = {"padding": [mul * 4, 4], "fill": fill, "padding_mode": m}
_test_op(
F.pad, T.Pad, device=self.device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)
# Test functional.pad and transforms.Pad with padding as tuple
fn_kwargs = meth_kwargs = {"padding": (mul * 2, 2, 2, mul * 2), "fill": fill, "padding_mode": m}
_test_op(
F.pad, T.Pad, device=self.device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)

def test_crop(self):
fn_kwargs = {"top": 2, "left": 3, "height": 4, "width": 5}
# Test transforms.RandomCrop with size and padding as tuple
meth_kwargs = {"size": (4, 5), "padding": (4, 4), "pad_if_needed": True, }
_test_op(
F.crop, T.RandomCrop, device=self.device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)

# Test transforms.functional.crop including outside the image area
fn_kwargs = {"top": -2, "left": 3, "height": 4, "width": 5} # top
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=self.device)

fn_kwargs = {"top": 1, "left": -3, "height": 4, "width": 5} # left
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=self.device)

fn_kwargs = {"top": 7, "left": 3, "height": 4, "width": 5} # bottom
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=self.device)

fn_kwargs = {"top": 3, "left": 8, "height": 4, "width": 5} # right
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=self.device)

fn_kwargs = {"top": -3, "left": -3, "height": 15, "width": 15} # all
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=self.device)

sizes = [5, [5, ], [6, 6]]
padding_configs = [
{"padding_mode": "constant", "fill": 0},
{"padding_mode": "constant", "fill": 10},
{"padding_mode": "constant", "fill": 20},
{"padding_mode": "edge"},
{"padding_mode": "reflect"},
]

for size in sizes:
for padding_config in padding_configs:
config = dict(padding_config)
config["size"] = size
_test_class_op(T.RandomCrop, self.device, config)

def test_center_crop(self):
fn_kwargs = {"output_size": (4, 5)}
meth_kwargs = {"size": (4, 5), }
_test_op(
F.center_crop, T.CenterCrop, device=self.device, fn_kwargs=fn_kwargs,
meth_kwargs=meth_kwargs
)
fn_kwargs = {"output_size": (5,)}
meth_kwargs = {"size": (5, )}
_test_op(
F.center_crop, T.CenterCrop, device=self.device, fn_kwargs=fn_kwargs,
meth_kwargs=meth_kwargs
)
tensor = torch.randint(0, 256, (3, 10, 10), dtype=torch.uint8, device=self.device)
# Test torchscript of transforms.CenterCrop with size as int
f = T.CenterCrop(size=5)
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)

# Test torchscript of transforms.CenterCrop with size as [int, ]
f = T.CenterCrop(size=[5, ])
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)

# Test torchscript of transforms.CenterCrop with size as tuple
f = T.CenterCrop(size=(6, 6))
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)

with get_tmp_dir() as tmp_dir:
scripted_fn.save(os.path.join(tmp_dir, "t_center_crop.pt"))

def _test_op_list_output(self, func, method, out_length, fn_kwargs=None, meth_kwargs=None):
if fn_kwargs is None:
fn_kwargs = {}
Expand Down Expand Up @@ -710,6 +578,156 @@ def test_autoaugment(self):
s_transform.save(os.path.join(tmp_dir, "t_autoaugment.pt"))


@pytest.mark.parametrize('device', cpu_and_gpu())
class TestColorJitter:
Copy link
Contributor Author

@AnirudhDagar AnirudhDagar Jun 8, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thought it would be better to have a separate class for all these tests. Also, avoids extra lines of code repetition of @pytest.mark.parametrize('device', cpu_and_gpu()) for each test.


@pytest.mark.parametrize('brightness', [0.1, 0.5, 1.0, 1.34, (0.3, 0.7), [0.4, 0.5]])
def test_color_jitter_brightness(self, brightness, device):
tol = 1.0 + 1e-10
meth_kwargs = {"brightness": brightness}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=tol, agg_method="max"
)

@pytest.mark.parametrize('contrast', [0.2, 0.5, 1.0, 1.5, (0.3, 0.7), [0.4, 0.5]])
def test_color_jitter_contrast(self, contrast, device):
tol = 1.0 + 1e-10
meth_kwargs = {"contrast": contrast}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=tol, agg_method="max"
)

@pytest.mark.parametrize('saturation', [0.5, 0.75, 1.0, 1.25, (0.3, 0.7), [0.3, 0.4]])
def test_color_jitter_saturation(self, saturation, device):
tol = 1.0 + 1e-10
meth_kwargs = {"saturation": saturation}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=tol, agg_method="max"
)

@pytest.mark.parametrize('hue', [0.2, 0.5, (-0.2, 0.3), [-0.4, 0.5]])
def test_color_jitter_hue(self, hue, device):
meth_kwargs = {"hue": hue}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=16.1, agg_method="max"
)

def test_color_jitter_all(self, device):
# All 4 parameters together
meth_kwargs = {"brightness": 0.2, "contrast": 0.2, "saturation": 0.2, "hue": 0.2}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=12.1, agg_method="max"
)


@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('m', ["constant", "edge", "reflect", "symmetric"])
@pytest.mark.parametrize('mul', [1, -1])
def test_pad(m, mul, device):
fill = 127 if m == "constant" else 0

# Test functional.pad (PIL and Tensor) with padding as single int
_test_functional_op(
F.pad, fn_kwargs={"padding": mul * 2, "fill": fill, "padding_mode": m},
device=device
)
# Test functional.pad and transforms.Pad with padding as [int, ]
fn_kwargs = meth_kwargs = {"padding": [mul * 2, ], "fill": fill, "padding_mode": m}
_test_op(
F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)
# Test functional.pad and transforms.Pad with padding as list
fn_kwargs = meth_kwargs = {"padding": [mul * 4, 4], "fill": fill, "padding_mode": m}
_test_op(
F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)
# Test functional.pad and transforms.Pad with padding as tuple
fn_kwargs = meth_kwargs = {"padding": (mul * 2, 2, 2, mul * 2), "fill": fill, "padding_mode": m}
_test_op(
F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)


@pytest.mark.parametrize('device', cpu_and_gpu())
def test_crop(device):
fn_kwargs = {"top": 2, "left": 3, "height": 4, "width": 5}
# Test transforms.RandomCrop with size and padding as tuple
meth_kwargs = {"size": (4, 5), "padding": (4, 4), "pad_if_needed": True, }
_test_op(
F.crop, T.RandomCrop, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)

# Test transforms.functional.crop including outside the image area
fn_kwargs = {"top": -2, "left": 3, "height": 4, "width": 5} # top
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)

fn_kwargs = {"top": 1, "left": -3, "height": 4, "width": 5} # left
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)

fn_kwargs = {"top": 7, "left": 3, "height": 4, "width": 5} # bottom
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)

fn_kwargs = {"top": 3, "left": 8, "height": 4, "width": 5} # right
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)

fn_kwargs = {"top": -3, "left": -3, "height": 15, "width": 15} # all
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)


@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('padding_config', [
{"padding_mode": "constant", "fill": 0},
{"padding_mode": "constant", "fill": 10},
{"padding_mode": "constant", "fill": 20},
{"padding_mode": "edge"},
{"padding_mode": "reflect"}
])
@pytest.mark.parametrize('size', [5, [5, ], [6, 6]])
Comment on lines +683 to +690
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This parametrization and the config["size"] = size below is a bit unnatural. I think it mostly comes from the fact that we want to avoid passing a default for fill but its default is 0 and we already hardcode it here. Maybe parametrizing over 'padding_mode, fill' and 'size' would make more sense, instead of parametrizing over 'padding_config'.

But it's not super important, so let's leave it as is!

def test_crop_pad(size, padding_config, device):
config = dict(padding_config)
config["size"] = size
_test_class_op(T.RandomCrop, device, config)


@pytest.mark.parametrize('device', cpu_and_gpu())
def test_center_crop(device):
fn_kwargs = {"output_size": (4, 5)}
meth_kwargs = {"size": (4, 5), }
_test_op(
F.center_crop, T.CenterCrop, device=device, fn_kwargs=fn_kwargs,
meth_kwargs=meth_kwargs
)
fn_kwargs = {"output_size": (5,)}
meth_kwargs = {"size": (5, )}
_test_op(
F.center_crop, T.CenterCrop, device=device, fn_kwargs=fn_kwargs,
meth_kwargs=meth_kwargs
)
tensor = torch.randint(0, 256, (3, 10, 10), dtype=torch.uint8, device=device)
# Test torchscript of transforms.CenterCrop with size as int
f = T.CenterCrop(size=5)
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)

# Test torchscript of transforms.CenterCrop with size as [int, ]
f = T.CenterCrop(size=[5, ])
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)

# Test torchscript of transforms.CenterCrop with size as tuple
f = T.CenterCrop(size=(6, 6))
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)

with get_tmp_dir() as tmp_dir:
scripted_fn.save(os.path.join(tmp_dir, "t_center_crop.pt"))


@unittest.skipIf(not torch.cuda.is_available(), reason="Skip if no CUDA device")
class CUDATester(Tester):

Expand Down