Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add opchecks for RoiAlign #8057

Merged
merged 4 commits into from
Oct 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
13 changes: 12 additions & 1 deletion test/optests_failures_dict.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,16 @@
{
"_description": "This is a dict containing failures for tests autogenerated by generate_opcheck_tests. For more details, please see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit",
"_version": 1,
"data": {}
"data": {
"torchvision::roi_align": {
"TestRoIAlign.test_aot_dispatch_dynamic__test_mps_error_inputs": {
"comment": "RuntimeError: MPS does not support roi_align backward with float16 inputs",
"status": "xfail"
},
"TestRoIAlign.test_autograd_registration__test_mps_error_inputs": {
"comment": "NotImplementedError: autograd_registration_check: NYI devices other than CPU/CUDA, got {'mps'}",
"status": "xfail"
}
}
}
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm leaving the mps-related stuff as xfail for now, we can fix them eventually in other PRs.

}
13 changes: 13 additions & 0 deletions test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,6 +474,7 @@ def test_boxes_shape(self):
@pytest.mark.parametrize("x_dtype", (torch.float16, torch.float32, torch.float64)) # , ids=str)
@pytest.mark.parametrize("contiguous", (True, False))
@pytest.mark.parametrize("deterministic", (True, False))
@pytest.mark.opcheck_only_one()
def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois_dtype=None):
if deterministic and device == "cpu":
pytest.skip("cpu is always deterministic, don't retest")
Expand All @@ -491,6 +492,7 @@ def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois
@pytest.mark.parametrize("deterministic", (True, False))
@pytest.mark.parametrize("x_dtype", (torch.float, torch.half))
@pytest.mark.parametrize("rois_dtype", (torch.float, torch.half))
@pytest.mark.opcheck_only_one()
def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
with torch.cuda.amp.autocast():
self.test_forward(
Expand All @@ -506,6 +508,7 @@ def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
@pytest.mark.parametrize("device", cpu_and_cuda_and_mps())
@pytest.mark.parametrize("contiguous", (True, False))
@pytest.mark.parametrize("deterministic", (True, False))
@pytest.mark.opcheck_only_one()
def test_backward(self, seed, device, contiguous, deterministic):
if deterministic and device == "cpu":
pytest.skip("cpu is always deterministic, don't retest")
Expand All @@ -520,6 +523,7 @@ def _make_rois(self, img_size, num_imgs, dtype, num_rois=1000):
@pytest.mark.parametrize("aligned", (True, False))
@pytest.mark.parametrize("scale, zero_point", ((1, 0), (2, 10), (0.1, 50)))
@pytest.mark.parametrize("qdtype", (torch.qint8, torch.quint8, torch.qint32))
@pytest.mark.opcheck_only_one()
def test_qroialign(self, aligned, scale, zero_point, qdtype):
"""Make sure quantized version of RoIAlign is close to float version"""
pool_size = 5
Expand Down Expand Up @@ -589,6 +593,15 @@ def test_jit_boxes_list(self):
self._helper_jit_boxes_list(model)


optests.generate_opcheck_tests(
testcase=TestRoIAlign,
namespaces=["torchvision"],
failures_dict_path=os.path.join(os.path.dirname(__file__), "optests_failures_dict.json"),
additional_decorators=[],
test_utils=OPTESTS,
)


class TestPSRoIAlign(RoIOpTester):
mps_backward_atol = 5e-2

Expand Down