Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
ec1ba33
cov: add test for ignite.contrib.engines
Mar 17, 2021
01fcefa
Merge remote-tracking branch 'upstream/master' into common-cov
Mar 17, 2021
1cb62f7
fix test
Mar 17, 2021
d0c617d
use shutil.move
Mar 17, 2021
1f7d613
Update tests/ignite/contrib/engines/test_common.py
Mar 18, 2021
f750880
Merge branch 'master' into common-cov
Mar 18, 2021
3afb9d7
Merge branch 'master' into common-cov
Mar 18, 2021
1f4e89a
Merge branch 'master' into common-cov
sdesrozis Mar 18, 2021
f36816f
Merge branch 'master' into common-cov
Mar 19, 2021
91de432
Merge branch 'master' into common-cov
Mar 19, 2021
2af8ede
Merge branch 'master' into common-cov
vfdev-5 Mar 19, 2021
839cca4
Merge branch 'master' into common-cov
vfdev-5 Mar 21, 2021
5064ec0
Merge branch 'master' into common-cov
Mar 24, 2021
5f33cae
fix: flake8 unused imports terminate_on_nan
Mar 24, 2021
fe7f415
fix: patch.dict for failing test_no_pynvml_package test
Mar 24, 2021
d3fb718
Merge remote-tracking branch 'upstream/master' into common-cov
Mar 25, 2021
11057fb
fix: remove py36 checks
Mar 25, 2021
68d11ad
fix: skipif tests
Mar 25, 2021
fa02453
Merge branch 'master' into common-cov
Mar 26, 2021
415c4db
Merge branch 'master' into common-cov
Mar 27, 2021
06d38be
Merge branch 'master' into common-cov
Apr 1, 2021
cb0fcfa
Merge branch 'master' into common-cov
vfdev-5 Apr 4, 2021
20eb936
Merge branch 'master' into common-cov
Apr 7, 2021
bd6d236
Merge remote-tracking branch 'upstream/master' into common-cov
Apr 11, 2021
3c50e0d
test: debug failing test
Apr 11, 2021
dc0ec68
autopep8 fix
ydcjeff Apr 11, 2021
dbf1174
test: debug failing test [skip netlify]
Apr 11, 2021
a661ad7
fix: change 10 -> 16
Apr 11, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 43 additions & 3 deletions tests/ignite/contrib/engines/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,14 @@ def forward(self, x):


def _test_setup_common_training_handlers(
dirname, device, rank=0, local_rank=0, distributed=False, lr_scheduler=None, save_handler=None
dirname,
device,
rank=0,
local_rank=0,
distributed=False,
lr_scheduler=None,
save_handler=None,
output_transform=lambda loss: loss,
):

lr = 0.01
Expand Down Expand Up @@ -74,7 +81,7 @@ def update_fn(engine, batch):
loss = y_pred.mean()
loss.backward()
optimizer.step()
return loss
return output_transform(loss)

train_sampler = None
if distributed and idist.get_world_size() > 1:
Expand Down Expand Up @@ -141,6 +148,22 @@ def test_asserts_setup_common_training_handlers():
train_sampler = MagicMock(spec=DistributedSampler)
setup_common_training_handlers(trainer, train_sampler=train_sampler)

with pytest.raises(RuntimeError, match=r"This contrib module requires available GPU"):
setup_common_training_handlers(trainer, with_gpu_stats=True)

with pytest.raises(TypeError, match=r"Unhandled type of update_function's output."):
trainer = Engine(lambda e, b: None)
setup_common_training_handlers(
trainer,
output_names=["loss"],
with_pbar_on_iters=False,
with_pbars=False,
with_gpu_stats=False,
stop_on_nan=False,
clear_cuda_cache=False,
)
trainer.run([1])


def test_no_warning_with_train_sampler(recwarn):
from torch.utils.data import RandomSampler
Expand All @@ -165,7 +188,6 @@ def test_assert_setup_common_training_handlers_wrong_train_sampler(distributed_c


def test_setup_common_training_handlers(dirname, capsys):

_test_setup_common_training_handlers(dirname, device="cpu")

# Check epoch-wise pbar
Expand All @@ -175,6 +197,24 @@ def test_setup_common_training_handlers(dirname, capsys):
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"

_test_setup_common_training_handlers(dirname, device="cpu", output_transform=lambda loss: [loss])

# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"

_test_setup_common_training_handlers(dirname, device="cpu", output_transform=lambda loss: {"batch_loss": loss})

# Check epoch-wise pbar
captured = capsys.readouterr()
out = captured.err.split("\r")
out = list(map(lambda x: x.strip(), out))
out = list(filter(None, out))
assert "Epoch" in out[-1] or "Epoch" in out[-2], f"{out[-2]}, {out[-1]}"


def test_setup_common_training_handlers_using_save_handler(dirname, capsys):

Expand Down
6 changes: 3 additions & 3 deletions tests/ignite/contrib/metrics/test_average_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,9 @@ def get_test_cases():
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 1),
(torch.randint(0, 2, size=(100, 1)).long(), torch.randint(0, 2, size=(100, 1)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10,)).long(), 16),
(torch.randint(0, 2, size=(16,)).long(), torch.randint(0, 2, size=(16,)).long(), 16),
(torch.randint(0, 2, size=(100,)).long(), torch.randint(0, 2, size=(100,)).long(), 16),
(torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 1)).long(), 16),
(torch.randint(0, 2, size=(16, 1)).long(), torch.randint(0, 2, size=(16, 1)).long(), 16),
(torch.randint(0, 2, size=(100, 1)).long(), torch.randint(0, 2, size=(100, 1)).long(), 16),
]
return test_cases
Expand Down Expand Up @@ -134,7 +134,7 @@ def get_test_cases():
(torch.randint(0, 2, size=(100, 4)).long(), torch.randint(0, 2, size=(100, 4)).long(), 1),
(torch.randint(0, 2, size=(200, 6)).long(), torch.randint(0, 2, size=(200, 6)).long(), 1),
# updated batches
(torch.randint(0, 2, size=(10, 4)).long(), torch.randint(0, 2, size=(10, 4)).long(), 16),
(torch.randint(0, 2, size=(16, 4)).long(), torch.randint(0, 2, size=(16, 4)).long(), 16),
(torch.randint(0, 2, size=(50, 7)).long(), torch.randint(0, 2, size=(50, 7)).long(), 16),
(torch.randint(0, 2, size=(100, 4)).long(), torch.randint(0, 2, size=(100, 4)).long(), 16),
(torch.randint(0, 2, size=(200, 6)).long(), torch.randint(0, 2, size=(200, 6)).long(), 16),
Expand Down
35 changes: 6 additions & 29 deletions tests/ignite/contrib/metrics/test_gpu_info.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import sys
from unittest.mock import Mock, patch

import pytest
Expand All @@ -7,35 +6,15 @@
from ignite.contrib.metrics import GpuInfo
from ignite.engine import Engine, State

python_below_36 = (sys.version[0] == "3" and int(sys.version[2]) < 6) or int(sys.version[0]) < 2

def test_no_pynvml_package():
with patch.dict("sys.modules", {"pynvml.smi": None}):
with pytest.raises(RuntimeError, match="This contrib module requires pynvml to be installed."):
GpuInfo()

@pytest.fixture
def no_site_packages():
import sys

import pynvml # noqa: F401

assert "pynvml" in sys.modules
pynvml_module = sys.modules["pynvml"]
del sys.modules["pynvml"]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
sys.modules["pynvml"] = pynvml_module


@pytest.mark.skipif(python_below_36, reason="No pynvml for python < 3.6")
def test_no_pynvml_package(no_site_packages):

with pytest.raises(RuntimeError, match="This contrib module requires pynvml to be installed."):
GpuInfo()


@pytest.mark.skipif(python_below_36 or torch.cuda.is_available(), reason="No pynvml for python < 3.6")
@pytest.mark.skipif(torch.cuda.is_available(), reason="Skip if GPU")
def test_no_gpu():

with pytest.raises(RuntimeError, match="This contrib module requires available GPU"):
GpuInfo()

Expand Down Expand Up @@ -79,7 +58,7 @@ def _test_gpu_info(device="cpu"):
assert "gpu:0 util(%)" not in engine.state.metrics


@pytest.mark.skipif(python_below_36 or not (torch.cuda.is_available()), reason="No pynvml for python < 3.6 and no GPU")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_gpu_info_on_cuda():
_test_gpu_info(device="cuda")

Expand All @@ -89,7 +68,6 @@ def test_gpu_info_on_cuda():

@pytest.fixture
def mock_pynvml_module():

with patch.dict(
"sys.modules",
{
Expand All @@ -115,7 +93,6 @@ def getInstance():

@pytest.fixture
def mock_gpu_is_available():

with patch("ignite.contrib.metrics.gpu_info.torch.cuda") as mock_cuda:
mock_cuda.is_available.return_value = True
yield mock_cuda
Expand Down