diff --git a/tests/ignite/contrib/engines/test_common.py b/tests/ignite/contrib/engines/test_common.py index fcdc3831e1ba..292a3ab553dd 100644 --- a/tests/ignite/contrib/engines/test_common.py +++ b/tests/ignite/contrib/engines/test_common.py @@ -39,7 +39,14 @@ def forward(self, x): def _test_setup_common_training_handlers( - dirname, device, rank=0, local_rank=0, distributed=False, lr_scheduler=None, save_handler=None + dirname, + device, + rank=0, + local_rank=0, + distributed=False, + lr_scheduler=None, + save_handler=None, + output_transform=lambda loss: loss, ): lr = 0.01 @@ -74,7 +81,7 @@ def update_fn(engine, batch): loss = y_pred.mean() loss.backward() optimizer.step() - return loss + return output_transform(loss) train_sampler = None if distributed and idist.get_world_size() > 1: @@ -141,6 +148,22 @@ def test_asserts_setup_common_training_handlers(): train_sampler = MagicMock(spec=DistributedSampler) setup_common_training_handlers(trainer, train_sampler=train_sampler) + with pytest.raises(RuntimeError, match=r"This contrib module requires available GPU"): + setup_common_training_handlers(trainer, with_gpu_stats=True) + + with pytest.raises(TypeError, match=r"Unhandled type of update_function's output."): + trainer = Engine(lambda e, b: None) + setup_common_training_handlers( + trainer, + output_names=["loss"], + with_pbar_on_iters=False, + with_pbars=False, + with_gpu_stats=False, + stop_on_nan=False, + clear_cuda_cache=False, + ) + trainer.run([1]) + def test_no_warning_with_train_sampler(recwarn): from torch.utils.data import RandomSampler @@ -165,7 +188,6 @@ def test_assert_setup_common_training_handlers_wrong_train_sampler(distributed_c def test_setup_common_training_handlers(dirname, capsys): - _test_setup_common_training_handlers(dirname, device="cpu") # Check epoch-wise pbar @@ -175,6 +197,24 @@ def test_setup_common_training_handlers(dirname, capsys): out = list(filter(None, out)) assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}" + _test_setup_common_training_handlers(dirname, device="cpu", output_transform=lambda loss: [loss]) + + # Check epoch-wise pbar + captured = capsys.readouterr() + out = captured.err.split("\r") + out = list(map(lambda x: x.strip(), out)) + out = list(filter(None, out)) + assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}" + + _test_setup_common_training_handlers(dirname, device="cpu", output_transform=lambda loss: {"batch_loss": loss}) + + # Check epoch-wise pbar + captured = capsys.readouterr() + out = captured.err.split("\r") + out = list(map(lambda x: x.strip(), out)) + out = list(filter(None, out)) + assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}" + def test_setup_common_training_handlers_using_save_handler(dirname, capsys): diff --git a/tests/ignite/contrib/metrics/test_average_precision.py b/tests/ignite/contrib/metrics/test_average_precision.py index 0683663fbd68..a51dc0e62a86 100644 --- a/tests/ignite/contrib/metrics/test_average_precision.py +++ b/tests/ignite/contrib/metrics/test_average_precision.py @@ -91,9 +91,9 @@ def get_test_cases(): (torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1), (torch.randint(0, 2, size=(100, 1)).long(), torch.randint(0, 2, size=(100, 1)).long(), 1), # updated batches - (torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 16), + (torch.randint(0, 2, size=(16,)).long(), torch.randint(0, 2, size=(16,)).long(), 16), (torch.randint(0, 2, size=(100,)).long(), torch.randint(0, 2, size=(100,)).long(), 16), - (torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 16), + (torch.randint(0, 2, size=(16, 1)).long(), torch.randint(0, 2, size=(16, 1)).long(), 16), (torch.randint(0, 2, size=(100, 1)).long(), torch.randint(0, 2, size=(100, 1)).long(), 16), ] return test_cases @@ -134,7 +134,7 @@ def get_test_cases(): (torch.randint(0, 2, size=(100, 4)).long(), torch.randint(0, 2, size=(100, 4)).long(), 1), (torch.randint(0, 2, size=(200, 6)).long(), torch.randint(0, 2, size=(200, 6)).long(), 1), # updated batches - (torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long(), 16), + (torch.randint(0, 2, size=(16, 4)).long(), torch.randint(0, 2, size=(16, 4)).long(), 16), (torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16), (torch.randint(0, 2, size=(100, 4)).long(), torch.randint(0, 2, size=(100, 4)).long(), 16), (torch.randint(0, 2, size=(200, 6)).long(), torch.randint(0, 2, size=(200, 6)).long(), 16), diff --git a/tests/ignite/contrib/metrics/test_gpu_info.py b/tests/ignite/contrib/metrics/test_gpu_info.py index b814cd9f7d49..97ee72b83905 100644 --- a/tests/ignite/contrib/metrics/test_gpu_info.py +++ b/tests/ignite/contrib/metrics/test_gpu_info.py @@ -1,4 +1,3 @@ -import sys from unittest.mock import Mock, patch import pytest @@ -7,35 +6,15 @@ from ignite.contrib.metrics import GpuInfo from ignite.engine import Engine, State -python_below_36 = (sys.version[0] == "3" and int(sys.version[2]) < 6) or int(sys.version[0]) < 2 +def test_no_pynvml_package(): + with patch.dict("sys.modules", {"pynvml.smi": None}): + with pytest.raises(RuntimeError, match="This contrib module requires pynvml to be installed."): + GpuInfo() -@pytest.fixture -def no_site_packages(): - import sys - - import pynvml # noqa: F401 - - assert "pynvml" in sys.modules - pynvml_module = sys.modules["pynvml"] - del sys.modules["pynvml"] - prev_path = list(sys.path) - sys.path = [p for p in sys.path if "site-packages" not in p] - yield "no_site_packages" - sys.path = prev_path - sys.modules["pynvml"] = pynvml_module - -@pytest.mark.skipif(python_below_36, reason="No pynvml for python < 3.6") -def test_no_pynvml_package(no_site_packages): - - with pytest.raises(RuntimeError, match="This contrib module requires pynvml to be installed."): - GpuInfo() - - -@pytest.mark.skipif(python_below_36 or torch.cuda.is_available(), reason="No pynvml for python < 3.6") +@pytest.mark.skipif(torch.cuda.is_available(), reason="Skip if GPU") def test_no_gpu(): - with pytest.raises(RuntimeError, match="This contrib module requires available GPU"): GpuInfo() @@ -79,7 +58,7 @@ def _test_gpu_info(device="cpu"): assert "gpu:0 util(%)" not in engine.state.metrics -@pytest.mark.skipif(python_below_36 or not (torch.cuda.is_available()), reason="No pynvml for python < 3.6 and no GPU") +@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU") def test_gpu_info_on_cuda(): _test_gpu_info(device="cuda") @@ -89,7 +68,6 @@ def test_gpu_info_on_cuda(): @pytest.fixture def mock_pynvml_module(): - with patch.dict( "sys.modules", { @@ -115,7 +93,6 @@ def getInstance(): @pytest.fixture def mock_gpu_is_available(): - with patch("ignite.contrib.metrics.gpu_info.torch.cuda") as mock_cuda: mock_cuda.is_available.return_value = True yield mock_cuda