From 912330533a73a3ff6cb7c088018a37d07585b132 Mon Sep 17 00:00:00 2001 From: Moh-Yakoub Date: Mon, 22 Feb 2021 22:21:25 +0000 Subject: [PATCH] Parameterize some metric tests --- tests/ignite/metrics/test_fbeta.py | 46 +++++++++++++++++------------- tests/ignite/metrics/test_loss.py | 29 ++----------------- 2 files changed, 29 insertions(+), 46 deletions(-) diff --git a/tests/ignite/metrics/test_fbeta.py b/tests/ignite/metrics/test_fbeta.py index 8e38d516140c..80da6682fce8 100644 --- a/tests/ignite/metrics/test_fbeta.py +++ b/tests/ignite/metrics/test_fbeta.py @@ -12,26 +12,32 @@ torch.manual_seed(12) -def test_wrong_inputs(): - - with pytest.raises(ValueError, match=r"Beta should be a positive integer"): - Fbeta(0.0) - - with pytest.raises(ValueError, match=r"Input precision metric should have average=False"): - p = Precision(average=True) - Fbeta(1.0, precision=p) - - with pytest.raises(ValueError, match=r"Input recall metric should have average=False"): - r = Recall(average=True) - Fbeta(1.0, recall=r) - - with pytest.raises(ValueError, match=r"If precision argument is provided, output_transform should be None"): - p = Precision(average=False) - Fbeta(1.0, precision=p, output_transform=lambda x: x) - - with pytest.raises(ValueError, match=r"If recall argument is provided, output_transform should be None"): - r = Recall(average=False) - Fbeta(1.0, recall=r, output_transform=lambda x: x) +@pytest.mark.parametrize( + "match, fbeta, precision, recall, output_transform", + [ + ("Beta should be a positive integer", 0.0, None, None, None), + ("Input precision metric should have average=False", 1.0, Precision(average=True), None, None), + ("Input recall metric should have average=False", 1.0, None, Recall(average=True), None), + ( + "If precision argument is provided, output_transform should be None", + 1.0, + Precision(average=False), + None, + lambda x: x, + ), + ( + "If recall argument is provided, output_transform should be None", + 1.0, + None, + Recall(average=False), + lambda x: x, + ), + ], +) +def test_wrong_inputs(match, fbeta, precision, recall, output_transform): + + with pytest.raises(ValueError, match=fr"{match}"): + Fbeta(fbeta, precision=precision, recall=recall, output_transform=output_transform) def test_integration(): diff --git a/tests/ignite/metrics/test_loss.py b/tests/ignite/metrics/test_loss.py index 592824b1b02e..0511bbbcc93a 100644 --- a/tests/ignite/metrics/test_loss.py +++ b/tests/ignite/metrics/test_loss.py @@ -17,22 +17,9 @@ def test_zero_div(): loss.compute() -def test_compute(): - loss = Loss(nll_loss) - - y_pred = torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]]).log() - y = torch.tensor([2, 2]).long() - loss.update((y_pred, y)) - assert_almost_equal(loss.compute(), 1.1512925625) - - y_pred = torch.tensor([[0.1, 0.3, 0.6], [0.6, 0.2, 0.2], [0.2, 0.7, 0.1]]).log() - y = torch.tensor([2, 0, 2]).long() - loss.update((y_pred, y)) - assert_almost_equal(loss.compute(), 1.1253643036) # average - - -def test_compute_on_criterion(): - loss = Loss(nn.NLLLoss()) +@pytest.mark.parametrize("loss_function", [nll_loss, nn.NLLLoss()]) +def test_compute(loss_function): + loss = Loss(loss_function) y_pred = torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]]).log() y = torch.tensor([2, 2]).long() @@ -135,16 +122,6 @@ def _test_distrib_accumulator_device(device): ), f"{type(loss._sum.device)}:{loss._sum.device} vs {type(metric_device)}:{metric_device}" -def test_sum_detached(): - loss = Loss(nll_loss) - - y_pred = torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]], requires_grad=True).log() - y = torch.tensor([2, 2]).long() - loss.update((y_pred, y)) - - assert not loss._sum.requires_grad - - @pytest.mark.distributed @pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support") @pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")