From 48c98fe983e1ac8a78d449fab414ce2d3a0d2e43 Mon Sep 17 00:00:00 2001 From: Heungwoo Lee Date: Fri, 4 Feb 2022 17:06:43 +0000 Subject: [PATCH 1/6] Refactor BoxOps tests to use parameterize --- test/test_ops.py | 208 +++++++++++++++++++++++++---------------------- 1 file changed, 113 insertions(+), 95 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index d687e2e2952..d3c4930b1f1 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1107,114 +1107,132 @@ def test_bbox_convert_jit(self): torch.testing.assert_close(scripted_cxcywh, box_cxcywh, rtol=0.0, atol=TOLERANCE) -class TestBoxArea: - def test_box_area(self): - def area_check(box, expected, tolerance=1e-4): - out = ops.box_area(box) - torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) - - # Check for int boxes - for dtype in [torch.int8, torch.int16, torch.int32, torch.int64]: - box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0]], dtype=dtype) - expected = torch.tensor([10000, 0]) - area_check(box_tensor, expected) - - # Check for float32 and float64 boxes - for dtype in [torch.float32, torch.float64]: - box_tensor = torch.tensor( - [ - [285.3538, 185.5758, 1193.5110, 851.4551], - [285.1472, 188.7374, 1192.4984, 851.0669], - [279.2440, 197.9812, 1189.4746, 849.2019], - ], - dtype=dtype, - ) - expected = torch.tensor([604723.0806, 600965.4666, 592761.0085], dtype=torch.float64) - area_check(box_tensor, expected, tolerance=0.05) - - # Check for float16 box - box_tensor = torch.tensor( - [[285.25, 185.625, 1194.0, 851.5], [285.25, 188.75, 1192.0, 851.0], [279.25, 198.0, 1189.0, 849.0]], - dtype=torch.float16, - ) - expected = torch.tensor([605113.875, 600495.1875, 592247.25]) - area_check(box_tensor, expected) +class BoxTestBase(ABC): + @property + @abstractmethod + def _target_fn(self) -> Tuple[bool, callable]: + pass - def test_box_area_jit(self): - box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0]], dtype=torch.float) - TOLERANCE = 1e-3 - expected = ops.box_area(box_tensor) - scripted_fn = torch.jit.script(ops.box_area) - scripted_area = scripted_fn(box_tensor) - torch.testing.assert_close(scripted_area, expected, rtol=0.0, atol=TOLERANCE) + def _perform_box_operation(self, box: Tensor): + is_binary_fn = self._target_fn()[0] + target_fn = self._target_fn()[1] + scripted_fn = torch.jit.script(target_fn) + return scripted_fn(box, box) if is_binary_fn else scripted_fn(box) + def _script_box_operation(self, box: Tensor): + is_binary_fn = self._target_fn()[0] + target_fn = self._target_fn()[1] + return target_fn(box, box) if is_binary_fn else target_fn(box) -class TestBoxIou: - def test_iou(self): - def iou_check(box, expected, tolerance=1e-4): - out = ops.box_iou(box, box) + def _run_test(self, test_input, dtype, tolerance, expected): + def assert_close(box: Tensor, expected: Tensor, tolerance): + out = self._perform_box_operation(box) torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) - # Check for int boxes - for dtype in [torch.int16, torch.int32, torch.int64]: - box = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=dtype) - expected = torch.tensor([[1.0, 0.25, 0.0], [0.25, 1.0, 0.0], [0.0, 0.0, 1.0]]) - iou_check(box, expected) + actual_box = torch.tensor(test_input, dtype=dtype) + expected_box = torch.tensor(expected) + assert_close(actual_box, expected_box, tolerance) - # Check for float boxes - for dtype in [torch.float16, torch.float32, torch.float64]: - box_tensor = torch.tensor( - [ - [285.3538, 185.5758, 1193.5110, 851.4551], - [285.1472, 188.7374, 1192.4984, 851.0669], - [279.2440, 197.9812, 1189.4746, 849.2019], - ], - dtype=dtype, - ) - expected = torch.tensor([[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]]) - iou_check(box_tensor, expected, tolerance=0.002 if dtype == torch.float16 else 1e-4) - - def test_iou_jit(self): - box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=torch.float) + def _run_jit_test(self, test_input): + box_tensor = torch.tensor(test_input, dtype=torch.float) TOLERANCE = 1e-3 - expected = ops.box_iou(box_tensor, box_tensor) - scripted_fn = torch.jit.script(ops.box_iou) - scripted_iou = scripted_fn(box_tensor, box_tensor) - torch.testing.assert_close(scripted_iou, expected, rtol=0.0, atol=TOLERANCE) + expected = self._perform_box_operation(box_tensor) + scripted_area = self._perform_box_operation(box_tensor) + torch.testing.assert_close(scripted_area, expected, rtol=0.0, atol=TOLERANCE) -class TestGenBoxIou: - def test_gen_iou(self): - def gen_iou_check(box, expected, tolerance=1e-4): - out = ops.generalized_box_iou(box, box) - torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) +class TestBoxArea(BoxTestBase): + __DEFAULT_TOLERANCE = 1e-4 + __TOLERANCE_0_DOT_05 = 0.05 + __INT_TEST_INPUT = [[0, 0, 100, 100], [0, 0, 0, 0]] + __INT_EXPECTED_OUTPUT = [10000, 0] + __FLOAT_32_N_64_TEST_INPUT = [ + [285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019], + ] + __FLOAT_32_N_64_EXPECTED_OUTPUT = [604723.0806, 600965.4666, 592761.0085] + __FLOAT_16_TEST_INPUT = [[285.25, 185.625, 1194.0, 851.5], [285.25, 188.75, 1192.0, 851.0], [279.25, 198.0, 1189.0, 849.0]] + __FLOAT_16_EXPECTED_OUTPUT = [605113.875, 600495.1875, 592247.25] + + def _target_fn(self) -> Tuple[bool, callable]: + return (False, ops.box_area) + + @pytest.mark.parametrize("test_input, dtype, tolerance, expected", [ + pytest.param(__INT_TEST_INPUT, torch.int8, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__FLOAT_32_N_64_TEST_INPUT, torch.float32, __TOLERANCE_0_DOT_05, __FLOAT_32_N_64_EXPECTED_OUTPUT), + pytest.param(__FLOAT_32_N_64_TEST_INPUT, torch.float64, __TOLERANCE_0_DOT_05, __FLOAT_32_N_64_EXPECTED_OUTPUT), + pytest.param(__FLOAT_16_TEST_INPUT, torch.float16, __DEFAULT_TOLERANCE, __FLOAT_16_EXPECTED_OUTPUT) + ]) + def test_box_area(self, test_input, dtype, tolerance, expected): + self._run_test(test_input, dtype, tolerance, expected) - # Check for int boxes - for dtype in [torch.int16, torch.int32, torch.int64]: - box = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=dtype) - expected = torch.tensor([[1.0, 0.25, -0.7778], [0.25, 1.0, -0.8611], [-0.7778, -0.8611, 1.0]]) - gen_iou_check(box, expected) + def test_box_area_jit(self): + self._run_jit_test([[0, 0, 100, 100], [0, 0, 0, 0]]) + + +class TestBoxIou(BoxTestBase): + __DEFAULT_TOLERANCE = 1e-4 + __TOLERANCE_0_DOT_002 = 0.002 + __INT_TEST_INPUT = [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] + __INT_EXPECTED_OUTPUT = [[1.0, 0.25, 0.0], [0.25, 1.0, 0.0], [0.0, 0.0, 1.0]] + __FLOAT_TEST_INPUT = [ + [285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019], + ] + __FLOAT_EXPECTED = [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] + + def _target_fn(self) -> Tuple[bool, callable]: + return (True, ops.box_iou) + + @pytest.mark.parametrize("test_input, dtype, tolerance, expected", [ + pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__FLOAT_TEST_INPUT, torch.float16, __TOLERANCE_0_DOT_002, __FLOAT_EXPECTED), + pytest.param(__FLOAT_TEST_INPUT, torch.float32, __DEFAULT_TOLERANCE, __FLOAT_EXPECTED), + pytest.param(__FLOAT_TEST_INPUT, torch.float64, __DEFAULT_TOLERANCE, __FLOAT_EXPECTED), + ]) + def test_iou(self, test_input, dtype, tolerance, expected): + self._run_test(test_input, dtype, tolerance, expected) - # Check for float boxes - for dtype in [torch.float16, torch.float32, torch.float64]: - box_tensor = torch.tensor( - [ - [285.3538, 185.5758, 1193.5110, 851.4551], - [285.1472, 188.7374, 1192.4984, 851.0669], - [279.2440, 197.9812, 1189.4746, 849.2019], - ], - dtype=dtype, - ) - expected = torch.tensor([[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]]) - gen_iou_check(box_tensor, expected, tolerance=0.002 if dtype == torch.float16 else 1e-3) + def test_iou_jit(self): + self._run_jit_test([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]]) + + +class TestGenBoxIou(BoxTestBase): + __DEFAULT_TOLERANCE = 1e-4 + __TOLERANCE_0_DOT_001 = 1e-3 + __TOLERANCE_0_DOT_002 = 0.002 + __INT_TEST_INPUT = [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] + __INT_EXPECTED_OUTPUT = [[1.0, 0.25, -0.7778], [0.25, 1.0, -0.8611], [-0.7778, -0.8611, 1.0]] + __FLOAT_TEST_INPUT = [ + [285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019], + ] + __FLOAT_EXPECTED = [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] + + def _target_fn(self) -> Tuple[bool, callable]: + return (True, ops.generalized_box_iou) + + @pytest.mark.parametrize("test_input, dtype, tolerance, expected", [ + pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__FLOAT_TEST_INPUT, torch.float16, __TOLERANCE_0_DOT_002, __FLOAT_EXPECTED), + pytest.param(__FLOAT_TEST_INPUT, torch.float32, __TOLERANCE_0_DOT_001, __FLOAT_EXPECTED), + pytest.param(__FLOAT_TEST_INPUT, torch.float64, __TOLERANCE_0_DOT_001, __FLOAT_EXPECTED), + ]) + def test_gen_iou(self, test_input, dtype, tolerance, expected): + self._run_test(test_input, dtype, tolerance, expected) def test_giou_jit(self): - box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=torch.float) - TOLERANCE = 1e-3 - expected = ops.generalized_box_iou(box_tensor, box_tensor) - scripted_fn = torch.jit.script(ops.generalized_box_iou) - scripted_iou = scripted_fn(box_tensor, box_tensor) - torch.testing.assert_close(scripted_iou, expected, rtol=0.0, atol=TOLERANCE) + self._run_jit_test([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]]) class TestMasksToBoxes: From 5f6dcb658aabcf65c4dec066659f796f924d0b9f Mon Sep 17 00:00:00 2001 From: Heungwoo Lee Date: Fri, 4 Feb 2022 17:06:43 +0000 Subject: [PATCH 2/6] Refactor BoxOps tests to use parameterize --- test/test_ops.py | 75 +++++++++++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 29 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index d3c4930b1f1..dcaa618b107 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -8,7 +8,7 @@ import pytest import torch import torch.fx -from common_utils import needs_cuda, cpu_and_gpu, assert_equal +from common_utils import assert_equal, cpu_and_gpu, needs_cuda from PIL import Image from torch import nn, Tensor from torch.autograd import gradcheck @@ -67,7 +67,7 @@ def test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, **kwar rois_dtype = self.dtype if rois_dtype is None else rois_dtype pool_size = 5 # n_channels % (pool_size ** 2) == 0 required for PS opeartions. - n_channels = 2 * (pool_size ** 2) + n_channels = 2 * (pool_size**2) x = torch.rand(2, n_channels, 10, 10, dtype=x_dtype, device=device) if not contiguous: x = x.permute(0, 1, 3, 2) @@ -103,7 +103,7 @@ def test_is_leaf_node(self, device): def test_backward(self, seed, device, contiguous): torch.random.manual_seed(seed) pool_size = 2 - x = torch.rand(1, 2 * (pool_size ** 2), 5, 5, dtype=self.dtype, device=device, requires_grad=True) + x = torch.rand(1, 2 * (pool_size**2), 5, 5, dtype=self.dtype, device=device, requires_grad=True) if not contiguous: x = x.permute(0, 1, 3, 2) rois = torch.tensor( @@ -1152,21 +1152,32 @@ class TestBoxArea(BoxTestBase): [279.2440, 197.9812, 1189.4746, 849.2019], ] __FLOAT_32_N_64_EXPECTED_OUTPUT = [604723.0806, 600965.4666, 592761.0085] - __FLOAT_16_TEST_INPUT = [[285.25, 185.625, 1194.0, 851.5], [285.25, 188.75, 1192.0, 851.0], [279.25, 198.0, 1189.0, 849.0]] + __FLOAT_16_TEST_INPUT = [ + [285.25, 185.625, 1194.0, 851.5], + [285.25, 188.75, 1192.0, 851.0], + [279.25, 198.0, 1189.0, 849.0], + ] __FLOAT_16_EXPECTED_OUTPUT = [605113.875, 600495.1875, 592247.25] def _target_fn(self) -> Tuple[bool, callable]: return (False, ops.box_area) - @pytest.mark.parametrize("test_input, dtype, tolerance, expected", [ - pytest.param(__INT_TEST_INPUT, torch.int8, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__FLOAT_32_N_64_TEST_INPUT, torch.float32, __TOLERANCE_0_DOT_05, __FLOAT_32_N_64_EXPECTED_OUTPUT), - pytest.param(__FLOAT_32_N_64_TEST_INPUT, torch.float64, __TOLERANCE_0_DOT_05, __FLOAT_32_N_64_EXPECTED_OUTPUT), - pytest.param(__FLOAT_16_TEST_INPUT, torch.float16, __DEFAULT_TOLERANCE, __FLOAT_16_EXPECTED_OUTPUT) - ]) + @pytest.mark.parametrize( + "test_input, dtype, tolerance, expected", + [ + pytest.param(__INT_TEST_INPUT, torch.int8, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param( + __FLOAT_32_N_64_TEST_INPUT, torch.float32, __TOLERANCE_0_DOT_05, __FLOAT_32_N_64_EXPECTED_OUTPUT + ), + pytest.param( + __FLOAT_32_N_64_TEST_INPUT, torch.float64, __TOLERANCE_0_DOT_05, __FLOAT_32_N_64_EXPECTED_OUTPUT + ), + pytest.param(__FLOAT_16_TEST_INPUT, torch.float16, __DEFAULT_TOLERANCE, __FLOAT_16_EXPECTED_OUTPUT), + ], + ) def test_box_area(self, test_input, dtype, tolerance, expected): self._run_test(test_input, dtype, tolerance, expected) @@ -1189,14 +1200,17 @@ class TestBoxIou(BoxTestBase): def _target_fn(self) -> Tuple[bool, callable]: return (True, ops.box_iou) - @pytest.mark.parametrize("test_input, dtype, tolerance, expected", [ - pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__FLOAT_TEST_INPUT, torch.float16, __TOLERANCE_0_DOT_002, __FLOAT_EXPECTED), - pytest.param(__FLOAT_TEST_INPUT, torch.float32, __DEFAULT_TOLERANCE, __FLOAT_EXPECTED), - pytest.param(__FLOAT_TEST_INPUT, torch.float64, __DEFAULT_TOLERANCE, __FLOAT_EXPECTED), - ]) + @pytest.mark.parametrize( + "test_input, dtype, tolerance, expected", + [ + pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__FLOAT_TEST_INPUT, torch.float16, __TOLERANCE_0_DOT_002, __FLOAT_EXPECTED), + pytest.param(__FLOAT_TEST_INPUT, torch.float32, __DEFAULT_TOLERANCE, __FLOAT_EXPECTED), + pytest.param(__FLOAT_TEST_INPUT, torch.float64, __DEFAULT_TOLERANCE, __FLOAT_EXPECTED), + ], + ) def test_iou(self, test_input, dtype, tolerance, expected): self._run_test(test_input, dtype, tolerance, expected) @@ -1220,14 +1234,17 @@ class TestGenBoxIou(BoxTestBase): def _target_fn(self) -> Tuple[bool, callable]: return (True, ops.generalized_box_iou) - @pytest.mark.parametrize("test_input, dtype, tolerance, expected", [ - pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__FLOAT_TEST_INPUT, torch.float16, __TOLERANCE_0_DOT_002, __FLOAT_EXPECTED), - pytest.param(__FLOAT_TEST_INPUT, torch.float32, __TOLERANCE_0_DOT_001, __FLOAT_EXPECTED), - pytest.param(__FLOAT_TEST_INPUT, torch.float64, __TOLERANCE_0_DOT_001, __FLOAT_EXPECTED), - ]) + @pytest.mark.parametrize( + "test_input, dtype, tolerance, expected", + [ + pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), + pytest.param(__FLOAT_TEST_INPUT, torch.float16, __TOLERANCE_0_DOT_002, __FLOAT_EXPECTED), + pytest.param(__FLOAT_TEST_INPUT, torch.float32, __TOLERANCE_0_DOT_001, __FLOAT_EXPECTED), + pytest.param(__FLOAT_TEST_INPUT, torch.float64, __TOLERANCE_0_DOT_001, __FLOAT_EXPECTED), + ], + ) def test_gen_iou(self, test_input, dtype, tolerance, expected): self._run_test(test_input, dtype, tolerance, expected) From 126a6120e8b06c64ef14e4fcbdf948acd8438278 Mon Sep 17 00:00:00 2001 From: Heungwoo Lee Date: Mon, 7 Feb 2022 15:42:03 +0000 Subject: [PATCH 3/6] Refactor BoxOps to use parameterize, addressed comments from PR#5380 --- test/test_ops.py | 183 ++++++++++++++++++++++++----------------------- 1 file changed, 92 insertions(+), 91 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index dcaa618b107..2baaf1d2989 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1113,142 +1113,143 @@ class BoxTestBase(ABC): def _target_fn(self) -> Tuple[bool, callable]: pass - def _perform_box_operation(self, box: Tensor): + def __perform_box_operation(self, box: Tensor, run_as_script: bool = False) -> Tensor: is_binary_fn = self._target_fn()[0] target_fn = self._target_fn()[1] - scripted_fn = torch.jit.script(target_fn) - return scripted_fn(box, box) if is_binary_fn else scripted_fn(box) + box_operation = torch.jit.script(target_fn) if run_as_script else target_fn + return box_operation(box, box) if is_binary_fn else box_operation(box) - def _script_box_operation(self, box: Tensor): - is_binary_fn = self._target_fn()[0] - target_fn = self._target_fn()[1] - return target_fn(box, box) if is_binary_fn else target_fn(box) - - def _run_test(self, test_input, dtype, tolerance, expected): + def _run_test(self, test_input: list, dtypes: list[torch.dtype], tolerance: float, expected: list) -> None: def assert_close(box: Tensor, expected: Tensor, tolerance): - out = self._perform_box_operation(box) + out = self.__perform_box_operation(box) torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) - actual_box = torch.tensor(test_input, dtype=dtype) - expected_box = torch.tensor(expected) - assert_close(actual_box, expected_box, tolerance) + for dtype in dtypes: + actual_box = torch.tensor(test_input, dtype=dtype) + expected_box = torch.tensor(expected) + assert_close(actual_box, expected_box, tolerance) - def _run_jit_test(self, test_input): + def _run_jit_test(self, test_input: list) -> None: box_tensor = torch.tensor(test_input, dtype=torch.float) - TOLERANCE = 1e-3 - expected = self._perform_box_operation(box_tensor) - scripted_area = self._perform_box_operation(box_tensor) - torch.testing.assert_close(scripted_area, expected, rtol=0.0, atol=TOLERANCE) + expected = self.__perform_box_operation(box_tensor, True) + scripted_area = self.__perform_box_operation(box_tensor, True) + torch.testing.assert_close(scripted_area, expected, rtol=0.0, atol=1e-3) class TestBoxArea(BoxTestBase): - __DEFAULT_TOLERANCE = 1e-4 - __TOLERANCE_0_DOT_05 = 0.05 - __INT_TEST_INPUT = [[0, 0, 100, 100], [0, 0, 0, 0]] - __INT_EXPECTED_OUTPUT = [10000, 0] - __FLOAT_32_N_64_TEST_INPUT = [ - [285.3538, 185.5758, 1193.5110, 851.4551], - [285.1472, 188.7374, 1192.4984, 851.0669], - [279.2440, 197.9812, 1189.4746, 849.2019], - ] - __FLOAT_32_N_64_EXPECTED_OUTPUT = [604723.0806, 600965.4666, 592761.0085] - __FLOAT_16_TEST_INPUT = [ - [285.25, 185.625, 1194.0, 851.5], - [285.25, 188.75, 1192.0, 851.0], - [279.25, 198.0, 1189.0, 849.0], - ] - __FLOAT_16_EXPECTED_OUTPUT = [605113.875, 600495.1875, 592247.25] - def _target_fn(self) -> Tuple[bool, callable]: return (False, ops.box_area) + def __generate_int_input() -> list[int]: + return [[0, 0, 100, 100], [0, 0, 0, 0]] + + def __generate_int_expected() -> list[int]: + return [10000, 0] + + def __generate_float_input(index: int) -> list[float]: + return [ + [ + [285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019], + ], + [[285.25, 185.625, 1194.0, 851.5], [285.25, 188.75, 1192.0, 851.0], [279.25, 198.0, 1189.0, 849.0]], + ][index] + + def __generate_float_expected(index: int) -> list[float]: + return [[604723.0806, 600965.4666, 592761.0085], [605113.875, 600495.1875, 592247.25]][index] + @pytest.mark.parametrize( - "test_input, dtype, tolerance, expected", + "test_input, dtypes, tolerance, expected", [ - pytest.param(__INT_TEST_INPUT, torch.int8, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param( - __FLOAT_32_N_64_TEST_INPUT, torch.float32, __TOLERANCE_0_DOT_05, __FLOAT_32_N_64_EXPECTED_OUTPUT - ), pytest.param( - __FLOAT_32_N_64_TEST_INPUT, torch.float64, __TOLERANCE_0_DOT_05, __FLOAT_32_N_64_EXPECTED_OUTPUT + __generate_int_input(), + [torch.int8, torch.int16, torch.int32, torch.int64], + 1e-4, + __generate_int_expected(), ), - pytest.param(__FLOAT_16_TEST_INPUT, torch.float16, __DEFAULT_TOLERANCE, __FLOAT_16_EXPECTED_OUTPUT), + pytest.param(__generate_float_input(0), [torch.float32, torch.float64], 0.05, __generate_float_expected(0)), + pytest.param(__generate_float_input(1), [torch.float16], 1e-4, __generate_float_expected(1)), ], ) - def test_box_area(self, test_input, dtype, tolerance, expected): - self._run_test(test_input, dtype, tolerance, expected) + def test_box_area(self, test_input: list, dtypes: list[torch.dtype], tolerance: float, expected: list) -> None: + self._run_test(test_input, dtypes, tolerance, expected) - def test_box_area_jit(self): + def test_box_area_jit(self) -> None: self._run_jit_test([[0, 0, 100, 100], [0, 0, 0, 0]]) class TestBoxIou(BoxTestBase): - __DEFAULT_TOLERANCE = 1e-4 - __TOLERANCE_0_DOT_002 = 0.002 - __INT_TEST_INPUT = [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] - __INT_EXPECTED_OUTPUT = [[1.0, 0.25, 0.0], [0.25, 1.0, 0.0], [0.0, 0.0, 1.0]] - __FLOAT_TEST_INPUT = [ - [285.3538, 185.5758, 1193.5110, 851.4551], - [285.1472, 188.7374, 1192.4984, 851.0669], - [279.2440, 197.9812, 1189.4746, 849.2019], - ] - __FLOAT_EXPECTED = [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] - def _target_fn(self) -> Tuple[bool, callable]: return (True, ops.box_iou) + def __generate_int_input() -> list[int]: + return [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] + + def __generate_int_expected() -> list[float]: + return [[1.0, 0.25, 0.0], [0.25, 1.0, 0.0], [0.0, 0.0, 1.0]] + + def __generate_float_input() -> list[float]: + return [ + [285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019], + ] + + def __generate_float_expected() -> list[float]: + return [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] + @pytest.mark.parametrize( - "test_input, dtype, tolerance, expected", + "test_input, dtypes, tolerance, expected", [ - pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__FLOAT_TEST_INPUT, torch.float16, __TOLERANCE_0_DOT_002, __FLOAT_EXPECTED), - pytest.param(__FLOAT_TEST_INPUT, torch.float32, __DEFAULT_TOLERANCE, __FLOAT_EXPECTED), - pytest.param(__FLOAT_TEST_INPUT, torch.float64, __DEFAULT_TOLERANCE, __FLOAT_EXPECTED), + pytest.param( + __generate_int_input(), [torch.int16, torch.int32, torch.int64], 1e-4, __generate_int_expected() + ), + pytest.param(__generate_float_input(), [torch.float16], 0.002, __generate_float_expected()), + pytest.param(__generate_float_input(), [torch.float32, torch.float64], 1e-4, __generate_float_expected()), ], ) - def test_iou(self, test_input, dtype, tolerance, expected): - self._run_test(test_input, dtype, tolerance, expected) + def test_iou(self, test_input: list, dtypes: list[torch.dtype], tolerance: float, expected: list) -> None: + self._run_test(test_input, dtypes, tolerance, expected) - def test_iou_jit(self): + def test_iou_jit(self) -> None: self._run_jit_test([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]]) class TestGenBoxIou(BoxTestBase): - __DEFAULT_TOLERANCE = 1e-4 - __TOLERANCE_0_DOT_001 = 1e-3 - __TOLERANCE_0_DOT_002 = 0.002 - __INT_TEST_INPUT = [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] - __INT_EXPECTED_OUTPUT = [[1.0, 0.25, -0.7778], [0.25, 1.0, -0.8611], [-0.7778, -0.8611, 1.0]] - __FLOAT_TEST_INPUT = [ - [285.3538, 185.5758, 1193.5110, 851.4551], - [285.1472, 188.7374, 1192.4984, 851.0669], - [279.2440, 197.9812, 1189.4746, 849.2019], - ] - __FLOAT_EXPECTED = [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] - def _target_fn(self) -> Tuple[bool, callable]: return (True, ops.generalized_box_iou) + def __generate_int_input() -> list[int]: + return [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] + + def __generate_int_expected() -> list[float]: + return [[1.0, 0.25, -0.7778], [0.25, 1.0, -0.8611], [-0.7778, -0.8611, 1.0]] + + def __generate_float_input() -> list[float]: + return [ + [285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019], + ] + + def __generate_float_expected() -> list[float]: + return [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] + @pytest.mark.parametrize( - "test_input, dtype, tolerance, expected", + "test_input, dtypes, tolerance, expected", [ - pytest.param(__INT_TEST_INPUT, torch.int16, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int32, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__INT_TEST_INPUT, torch.int64, __DEFAULT_TOLERANCE, __INT_EXPECTED_OUTPUT), - pytest.param(__FLOAT_TEST_INPUT, torch.float16, __TOLERANCE_0_DOT_002, __FLOAT_EXPECTED), - pytest.param(__FLOAT_TEST_INPUT, torch.float32, __TOLERANCE_0_DOT_001, __FLOAT_EXPECTED), - pytest.param(__FLOAT_TEST_INPUT, torch.float64, __TOLERANCE_0_DOT_001, __FLOAT_EXPECTED), + pytest.param( + __generate_int_input(), [torch.int16, torch.int32, torch.int64], 1e-4, __generate_int_expected() + ), + pytest.param(__generate_float_input(), [torch.float16], 0.002, __generate_float_expected()), + pytest.param(__generate_float_input(), [torch.float32, torch.float64], 0.001, __generate_float_expected()), ], ) - def test_gen_iou(self, test_input, dtype, tolerance, expected): - self._run_test(test_input, dtype, tolerance, expected) + def test_gen_iou(self, test_input: list, dtypes: list[torch.dtype], tolerance: float, expected: list) -> None: + self._run_test(test_input, dtypes, tolerance, expected) - def test_giou_jit(self): + def test_giou_jit(self) -> None: self._run_jit_test([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]]) From 01568ab8a4b5c30248bbecd7b247a6f0e7b84407 Mon Sep 17 00:00:00 2001 From: Heungwoo Lee Date: Mon, 7 Feb 2022 15:49:41 +0000 Subject: [PATCH 4/6] Refactor BoxOps to use parameterize, addressed minor styling comments from PR#5380 --- test/test_ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index 2baaf1d2989..2905d75e207 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -67,7 +67,7 @@ def test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, **kwar rois_dtype = self.dtype if rois_dtype is None else rois_dtype pool_size = 5 # n_channels % (pool_size ** 2) == 0 required for PS opeartions. - n_channels = 2 * (pool_size**2) + n_channels = 2 * (pool_size ** 2) x = torch.rand(2, n_channels, 10, 10, dtype=x_dtype, device=device) if not contiguous: x = x.permute(0, 1, 3, 2) @@ -103,7 +103,7 @@ def test_is_leaf_node(self, device): def test_backward(self, seed, device, contiguous): torch.random.manual_seed(seed) pool_size = 2 - x = torch.rand(1, 2 * (pool_size**2), 5, 5, dtype=self.dtype, device=device, requires_grad=True) + x = torch.rand(1, 2 * (pool_size ** 2), 5, 5, dtype=self.dtype, device=device, requires_grad=True) if not contiguous: x = x.permute(0, 1, 3, 2) rois = torch.tensor( From 45537e84bbc2b92a8760034326e3956b2428e2ec Mon Sep 17 00:00:00 2001 From: Heungwoo Lee Date: Tue, 8 Feb 2022 10:35:54 +0000 Subject: [PATCH 5/6] Refactor BoxOps to use parameterize, addressed typing errorsfrom PR#5380 --- test/test_ops.py | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index 2905d75e207..50a724e7d48 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -2,7 +2,7 @@ import os from abc import ABC, abstractmethod from functools import lru_cache -from typing import Tuple +from typing import Callable, List, Tuple import numpy as np import pytest @@ -1108,9 +1108,8 @@ def test_bbox_convert_jit(self): class BoxTestBase(ABC): - @property @abstractmethod - def _target_fn(self) -> Tuple[bool, callable]: + def _target_fn(self) -> Tuple[bool, Callable]: pass def __perform_box_operation(self, box: Tensor, run_as_script: bool = False) -> Tensor: @@ -1119,7 +1118,7 @@ def __perform_box_operation(self, box: Tensor, run_as_script: bool = False) -> T box_operation = torch.jit.script(target_fn) if run_as_script else target_fn return box_operation(box, box) if is_binary_fn else box_operation(box) - def _run_test(self, test_input: list, dtypes: list[torch.dtype], tolerance: float, expected: list) -> None: + def _run_test(self, test_input: List, dtypes: List[torch.dtype], tolerance: float, expected: List) -> None: def assert_close(box: Tensor, expected: Tensor, tolerance): out = self.__perform_box_operation(box) torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) @@ -1129,7 +1128,7 @@ def assert_close(box: Tensor, expected: Tensor, tolerance): expected_box = torch.tensor(expected) assert_close(actual_box, expected_box, tolerance) - def _run_jit_test(self, test_input: list) -> None: + def _run_jit_test(self, test_input: List) -> None: box_tensor = torch.tensor(test_input, dtype=torch.float) expected = self.__perform_box_operation(box_tensor, True) scripted_area = self.__perform_box_operation(box_tensor, True) @@ -1137,16 +1136,16 @@ def _run_jit_test(self, test_input: list) -> None: class TestBoxArea(BoxTestBase): - def _target_fn(self) -> Tuple[bool, callable]: + def _target_fn(self) -> Tuple[bool, Callable]: return (False, ops.box_area) - def __generate_int_input() -> list[int]: + def __generate_int_input() -> List[List[int]]: return [[0, 0, 100, 100], [0, 0, 0, 0]] - def __generate_int_expected() -> list[int]: + def __generate_int_expected() -> List[int]: return [10000, 0] - def __generate_float_input(index: int) -> list[float]: + def __generate_float_input(index: int) -> List[List[float]]: return [ [ [285.3538, 185.5758, 1193.5110, 851.4551], @@ -1156,7 +1155,7 @@ def __generate_float_input(index: int) -> list[float]: [[285.25, 185.625, 1194.0, 851.5], [285.25, 188.75, 1192.0, 851.0], [279.25, 198.0, 1189.0, 849.0]], ][index] - def __generate_float_expected(index: int) -> list[float]: + def __generate_float_expected(index: int) -> List[float]: return [[604723.0806, 600965.4666, 592761.0085], [605113.875, 600495.1875, 592247.25]][index] @pytest.mark.parametrize( @@ -1172,7 +1171,7 @@ def __generate_float_expected(index: int) -> list[float]: pytest.param(__generate_float_input(1), [torch.float16], 1e-4, __generate_float_expected(1)), ], ) - def test_box_area(self, test_input: list, dtypes: list[torch.dtype], tolerance: float, expected: list) -> None: + def test_box_area(self, test_input: List, dtypes: List[torch.dtype], tolerance: float, expected: List) -> None: self._run_test(test_input, dtypes, tolerance, expected) def test_box_area_jit(self) -> None: @@ -1180,23 +1179,23 @@ def test_box_area_jit(self) -> None: class TestBoxIou(BoxTestBase): - def _target_fn(self) -> Tuple[bool, callable]: + def _target_fn(self) -> Tuple[bool, Callable]: return (True, ops.box_iou) - def __generate_int_input() -> list[int]: + def __generate_int_input() -> List[List[int]]: return [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] - def __generate_int_expected() -> list[float]: + def __generate_int_expected() -> List[List[float]]: return [[1.0, 0.25, 0.0], [0.25, 1.0, 0.0], [0.0, 0.0, 1.0]] - def __generate_float_input() -> list[float]: + def __generate_float_input() -> List[List[float]]: return [ [285.3538, 185.5758, 1193.5110, 851.4551], [285.1472, 188.7374, 1192.4984, 851.0669], [279.2440, 197.9812, 1189.4746, 849.2019], ] - def __generate_float_expected() -> list[float]: + def __generate_float_expected() -> List[List[float]]: return [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] @pytest.mark.parametrize( @@ -1209,7 +1208,7 @@ def __generate_float_expected() -> list[float]: pytest.param(__generate_float_input(), [torch.float32, torch.float64], 1e-4, __generate_float_expected()), ], ) - def test_iou(self, test_input: list, dtypes: list[torch.dtype], tolerance: float, expected: list) -> None: + def test_iou(self, test_input: List, dtypes: List[torch.dtype], tolerance: float, expected: List) -> None: self._run_test(test_input, dtypes, tolerance, expected) def test_iou_jit(self) -> None: @@ -1217,23 +1216,23 @@ def test_iou_jit(self) -> None: class TestGenBoxIou(BoxTestBase): - def _target_fn(self) -> Tuple[bool, callable]: + def _target_fn(self) -> Tuple[bool, Callable]: return (True, ops.generalized_box_iou) - def __generate_int_input() -> list[int]: + def __generate_int_input() -> List[List[int]]: return [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] - def __generate_int_expected() -> list[float]: + def __generate_int_expected() -> List[List[float]]: return [[1.0, 0.25, -0.7778], [0.25, 1.0, -0.8611], [-0.7778, -0.8611, 1.0]] - def __generate_float_input() -> list[float]: + def __generate_float_input() -> List[List[float]]: return [ [285.3538, 185.5758, 1193.5110, 851.4551], [285.1472, 188.7374, 1192.4984, 851.0669], [279.2440, 197.9812, 1189.4746, 849.2019], ] - def __generate_float_expected() -> list[float]: + def __generate_float_expected() -> List[List[float]]: return [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] @pytest.mark.parametrize( @@ -1246,7 +1245,7 @@ def __generate_float_expected() -> list[float]: pytest.param(__generate_float_input(), [torch.float32, torch.float64], 0.001, __generate_float_expected()), ], ) - def test_gen_iou(self, test_input: list, dtypes: list[torch.dtype], tolerance: float, expected: list) -> None: + def test_gen_iou(self, test_input: List, dtypes: List[torch.dtype], tolerance: float, expected: List) -> None: self._run_test(test_input, dtypes, tolerance, expected) def test_giou_jit(self) -> None: From b25fcb4dc658acf6f20f1d6fbc746c4f3c09bc2b Mon Sep 17 00:00:00 2001 From: Heungwoo Lee Date: Tue, 8 Feb 2022 12:42:09 +0000 Subject: [PATCH 6/6] Refactor BoxOps to use parameterize, addressed minor naming comments for PR#5380 --- test/test_ops.py | 52 ++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index 50a724e7d48..4fa0beb281b 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1112,7 +1112,7 @@ class BoxTestBase(ABC): def _target_fn(self) -> Tuple[bool, Callable]: pass - def __perform_box_operation(self, box: Tensor, run_as_script: bool = False) -> Tensor: + def _perform_box_operation(self, box: Tensor, run_as_script: bool = False) -> Tensor: is_binary_fn = self._target_fn()[0] target_fn = self._target_fn()[1] box_operation = torch.jit.script(target_fn) if run_as_script else target_fn @@ -1120,7 +1120,7 @@ def __perform_box_operation(self, box: Tensor, run_as_script: bool = False) -> T def _run_test(self, test_input: List, dtypes: List[torch.dtype], tolerance: float, expected: List) -> None: def assert_close(box: Tensor, expected: Tensor, tolerance): - out = self.__perform_box_operation(box) + out = self._perform_box_operation(box) torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) for dtype in dtypes: @@ -1130,8 +1130,8 @@ def assert_close(box: Tensor, expected: Tensor, tolerance): def _run_jit_test(self, test_input: List) -> None: box_tensor = torch.tensor(test_input, dtype=torch.float) - expected = self.__perform_box_operation(box_tensor, True) - scripted_area = self.__perform_box_operation(box_tensor, True) + expected = self._perform_box_operation(box_tensor, True) + scripted_area = self._perform_box_operation(box_tensor, True) torch.testing.assert_close(scripted_area, expected, rtol=0.0, atol=1e-3) @@ -1139,13 +1139,13 @@ class TestBoxArea(BoxTestBase): def _target_fn(self) -> Tuple[bool, Callable]: return (False, ops.box_area) - def __generate_int_input() -> List[List[int]]: + def _generate_int_input() -> List[List[int]]: return [[0, 0, 100, 100], [0, 0, 0, 0]] - def __generate_int_expected() -> List[int]: + def _generate_int_expected() -> List[int]: return [10000, 0] - def __generate_float_input(index: int) -> List[List[float]]: + def _generate_float_input(index: int) -> List[List[float]]: return [ [ [285.3538, 185.5758, 1193.5110, 851.4551], @@ -1155,20 +1155,20 @@ def __generate_float_input(index: int) -> List[List[float]]: [[285.25, 185.625, 1194.0, 851.5], [285.25, 188.75, 1192.0, 851.0], [279.25, 198.0, 1189.0, 849.0]], ][index] - def __generate_float_expected(index: int) -> List[float]: + def _generate_float_expected(index: int) -> List[float]: return [[604723.0806, 600965.4666, 592761.0085], [605113.875, 600495.1875, 592247.25]][index] @pytest.mark.parametrize( "test_input, dtypes, tolerance, expected", [ pytest.param( - __generate_int_input(), + _generate_int_input(), [torch.int8, torch.int16, torch.int32, torch.int64], 1e-4, - __generate_int_expected(), + _generate_int_expected(), ), - pytest.param(__generate_float_input(0), [torch.float32, torch.float64], 0.05, __generate_float_expected(0)), - pytest.param(__generate_float_input(1), [torch.float16], 1e-4, __generate_float_expected(1)), + pytest.param(_generate_float_input(0), [torch.float32, torch.float64], 0.05, _generate_float_expected(0)), + pytest.param(_generate_float_input(1), [torch.float16], 1e-4, _generate_float_expected(1)), ], ) def test_box_area(self, test_input: List, dtypes: List[torch.dtype], tolerance: float, expected: List) -> None: @@ -1182,30 +1182,30 @@ class TestBoxIou(BoxTestBase): def _target_fn(self) -> Tuple[bool, Callable]: return (True, ops.box_iou) - def __generate_int_input() -> List[List[int]]: + def _generate_int_input() -> List[List[int]]: return [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] - def __generate_int_expected() -> List[List[float]]: + def _generate_int_expected() -> List[List[float]]: return [[1.0, 0.25, 0.0], [0.25, 1.0, 0.0], [0.0, 0.0, 1.0]] - def __generate_float_input() -> List[List[float]]: + def _generate_float_input() -> List[List[float]]: return [ [285.3538, 185.5758, 1193.5110, 851.4551], [285.1472, 188.7374, 1192.4984, 851.0669], [279.2440, 197.9812, 1189.4746, 849.2019], ] - def __generate_float_expected() -> List[List[float]]: + def _generate_float_expected() -> List[List[float]]: return [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] @pytest.mark.parametrize( "test_input, dtypes, tolerance, expected", [ pytest.param( - __generate_int_input(), [torch.int16, torch.int32, torch.int64], 1e-4, __generate_int_expected() + _generate_int_input(), [torch.int16, torch.int32, torch.int64], 1e-4, _generate_int_expected() ), - pytest.param(__generate_float_input(), [torch.float16], 0.002, __generate_float_expected()), - pytest.param(__generate_float_input(), [torch.float32, torch.float64], 1e-4, __generate_float_expected()), + pytest.param(_generate_float_input(), [torch.float16], 0.002, _generate_float_expected()), + pytest.param(_generate_float_input(), [torch.float32, torch.float64], 1e-4, _generate_float_expected()), ], ) def test_iou(self, test_input: List, dtypes: List[torch.dtype], tolerance: float, expected: List) -> None: @@ -1219,30 +1219,30 @@ class TestGenBoxIou(BoxTestBase): def _target_fn(self) -> Tuple[bool, Callable]: return (True, ops.generalized_box_iou) - def __generate_int_input() -> List[List[int]]: + def _generate_int_input() -> List[List[int]]: return [[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]] - def __generate_int_expected() -> List[List[float]]: + def _generate_int_expected() -> List[List[float]]: return [[1.0, 0.25, -0.7778], [0.25, 1.0, -0.8611], [-0.7778, -0.8611, 1.0]] - def __generate_float_input() -> List[List[float]]: + def _generate_float_input() -> List[List[float]]: return [ [285.3538, 185.5758, 1193.5110, 851.4551], [285.1472, 188.7374, 1192.4984, 851.0669], [279.2440, 197.9812, 1189.4746, 849.2019], ] - def __generate_float_expected() -> List[List[float]]: + def _generate_float_expected() -> List[List[float]]: return [[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]] @pytest.mark.parametrize( "test_input, dtypes, tolerance, expected", [ pytest.param( - __generate_int_input(), [torch.int16, torch.int32, torch.int64], 1e-4, __generate_int_expected() + _generate_int_input(), [torch.int16, torch.int32, torch.int64], 1e-4, _generate_int_expected() ), - pytest.param(__generate_float_input(), [torch.float16], 0.002, __generate_float_expected()), - pytest.param(__generate_float_input(), [torch.float32, torch.float64], 0.001, __generate_float_expected()), + pytest.param(_generate_float_input(), [torch.float16], 0.002, _generate_float_expected()), + pytest.param(_generate_float_input(), [torch.float32, torch.float64], 0.001, _generate_float_expected()), ], ) def test_gen_iou(self, test_input: List, dtypes: List[torch.dtype], tolerance: float, expected: List) -> None: