Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 104 additions & 0 deletions tests/ignite/contrib/metrics/regression/test_manhattan_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

import ignite.distributed as idist
from ignite.contrib.metrics.regression import ManhattanDistance
from ignite.engine import Engine


def test_wrong_input_shapes():
Expand Down Expand Up @@ -63,6 +64,51 @@ def test_mahattan_distance():
assert manhattan.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)


def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)

engine = Engine(update_fn)

m = ManhattanDistance(output_transform=lambda x: (x[1], x[2]))
m.attach(engine, "md")

np_y = y.numpy()
np_y_pred = y_pred.numpy()

manhattan = DistanceMetric.get_metric("manhattan")

data = list(range(y_pred.shape[0] // batch_size))
md = engine.run(data, max_epochs=1).metrics["md"]

assert manhattan.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(md)

def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(200,)), torch.rand(size=(200,)), 10),
(torch.rand(size=(100,)), torch.rand(size=(100,)), 20),
(torch.rand(size=(200,)), torch.rand(size=(200,)), 20),
]
return test_cases

for _ in range(10):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)


def test_error_is_not_nan():
m = ManhattanDistance()
m.update((torch.zeros(4), torch.zeros(4)))
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors


def _test_distrib_compute(device):
rank = idist.get_rank()

Expand Down Expand Up @@ -93,12 +139,64 @@ def _test(metric_device):
_test(idist.device())


def _test_distrib_integration(device):

rank = idist.get_rank()
torch.manual_seed(12)

manhattan = DistanceMetric.get_metric("manhattan")

def _test(n_epochs, metric_device):
metric_device = torch.device(metric_device)
n_iters = 80
s = 16
n_classes = 2

offset = n_iters * s
y_true = torch.rand(size=(offset * idist.get_world_size(),)).to(device)
y_preds = torch.rand(size=(offset * idist.get_world_size(),)).to(device)

def update(engine, i):
return (
y_preds[i * s + rank * offset : (i + 1) * s + rank * offset],
y_true[i * s + rank * offset : (i + 1) * s + rank * offset],
)

engine = Engine(update)

m = ManhattanDistance(device=metric_device)
m.attach(engine, "md")

data = list(range(n_iters))
engine.run(data=data, max_epochs=n_epochs)

assert "md" in engine.state.metrics

res = engine.state.metrics["md"]
if isinstance(res, torch.Tensor):
res = res.cpu().numpy()

np_y_true = y_true.cpu().numpy()
np_y_preds = y_preds.cpu().numpy()

assert pytest.approx(res) == manhattan.pairwise([np_y_preds, np_y_true])[0][1]

metric_devices = ["cpu"]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
for _ in range(2):
_test(n_epochs=1, metric_device=metric_device)
_test(n_epochs=2, metric_device=metric_device)


@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(distributed_context_single_node_nccl):
device = torch.device(f"cuda:{distributed_context_single_node_nccl['local_rank']}")
_test_distrib_compute(device)
_test_distrib_integration(device)


@pytest.mark.distributed
Expand All @@ -107,6 +205,7 @@ def test_distrib_cpu(distributed_context_single_node_gloo):

device = torch.device("cpu")
_test_distrib_compute(device)
_test_distrib_integration(device)


@pytest.mark.distributed
Expand All @@ -118,6 +217,7 @@ def test_distrib_hvd(gloo_hvd_executor):
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()

gloo_hvd_executor(_test_distrib_compute, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)


@pytest.mark.multinode_distributed
Expand All @@ -126,6 +226,7 @@ def test_distrib_hvd(gloo_hvd_executor):
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = torch.device("cpu")
_test_distrib_compute(device)
_test_distrib_integration(device)


@pytest.mark.multinode_distributed
Expand All @@ -134,6 +235,7 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = torch.device(f"cuda:{distributed_context_multi_node_nccl['local_rank']}")
_test_distrib_compute(device)
_test_distrib_integration(device)


@pytest.mark.tpu
Expand All @@ -142,11 +244,13 @@ def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)


def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_compute(device)
_test_distrib_integration(device)


@pytest.mark.tpu
Expand Down