From dbbc51c7ba7df2276f93aafe752ff09d11aa59bf Mon Sep 17 00:00:00 2001 From: LukasHedegaard Date: Sun, 11 Jul 2021 19:39:58 +0200 Subject: [PATCH] Loosen confidence in test_(co)x3d_learner --- .../activity_recognition/cox3d/test_cox3d_learner.py | 4 ++-- .../perception/activity_recognition/x3d/test_x3d_learner.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py index 02fdaff7f3..1ee1cb3671 100644 --- a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py @@ -109,11 +109,11 @@ def test_infer(self): # Input is Image results2 = self.learner.infer(Image(batch[0], dtype=np.float)) - assert torch.allclose(results1[0].confidence, results2[0].confidence) + assert torch.allclose(results1[0].confidence, results2[0].confidence, atol=1e-6) # Input is List[Image] results3 = self.learner.infer([Image(v, dtype=np.float) for v in batch]) - assert all([torch.allclose(r1.confidence, r3.confidence) for (r1, r3) in zip(results1, results3)]) + assert all([torch.allclose(r1.confidence, r3.confidence, atol=1e-6) for (r1, r3) in zip(results1, results3)]) def test_optimize(self): self.learner.ort_session = None diff --git a/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py b/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py index 9eb29f9c78..b4e9d03e9a 100644 --- a/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py @@ -107,12 +107,12 @@ def test_infer(self): # Input is Video results2 = self.learner.infer(Video(batch[0])) assert results1[0].data == results2[0].data - assert torch.allclose(results1[0].confidence, results2[0].confidence) + assert torch.allclose(results1[0].confidence, results2[0].confidence, atol=1e-6) # Input is List[Video] results3 = self.learner.infer([Video(v) for v in batch]) assert all([ - r1.data == r3.data and torch.allclose(r1.confidence, r3.confidence) + r1.data == r3.data and torch.allclose(r1.confidence, r3.confidence, atol=1e-6) for (r1, r3) in zip(results1, results3) ])