Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update test for CosineSimilarity Metric #3218

Merged
merged 29 commits into from
Mar 25, 2024
Merged
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
c7f0a1a
add cosine similarity
kzkadc Mar 14, 2024
9c87580
update doc for cosine similarity metric
kzkadc Mar 14, 2024
12a4d56
fix the position of the CosineSimilarity
kzkadc Mar 14, 2024
e09b86f
Update ignite/contrib/metrics/cosine_similarity.py
kzkadc Mar 18, 2024
99852d5
autopep8 fix
kzkadc Mar 18, 2024
54c16c1
move CosineSimilarity from contrib.metrics to metrics
kzkadc Mar 18, 2024
9524d20
autopep8 fix
kzkadc Mar 18, 2024
b7dd7ea
fix typo
kzkadc Mar 18, 2024
ef33b49
Merge branch 'cosine_similarity' of github.com:kzkadc/ignite into cos…
kzkadc Mar 18, 2024
52aeaee
fix typo
kzkadc Mar 18, 2024
78b4029
Update ignite/metrics/cosine_similarity.py
kzkadc Mar 18, 2024
6bfbe88
autopep8 fix
kzkadc Mar 18, 2024
a83e02c
Update ignite/metrics/cosine_similarity.py
kzkadc Mar 18, 2024
9e4fd31
fix formatting
kzkadc Mar 18, 2024
377baea
autopep8 fix
kzkadc Mar 18, 2024
87d2854
Merge branch 'cosine_similarity' of github.com:kzkadc/ignite into cos…
kzkadc Mar 18, 2024
ad16ce7
fix formatting
kzkadc Mar 18, 2024
6d3193e
autopep8 fix
kzkadc Mar 18, 2024
b2b8de4
add test for CosineSimilarity metric
kzkadc Mar 18, 2024
8156dc6
autopep8 fix
kzkadc Mar 18, 2024
0cd8440
Merge branch 'master' into cosine_similarity
vfdev-5 Mar 19, 2024
0778d07
Merge branch 'master' into cosine_similarity
vfdev-5 Mar 19, 2024
d05f854
fix error when testing in xla
kzkadc Mar 20, 2024
eff647f
Merge branch 'master' into cosine_similarity
kzkadc Mar 20, 2024
723e2d9
Merge branch 'master' into cosine_similarity
kzkadc Mar 23, 2024
4a883ba
update test for CosineSimilarity metric
kzkadc Mar 23, 2024
c903bcb
Merge branch 'master' into cosine_similarity
kzkadc Mar 24, 2024
3c73a22
update test for CosineSimilarity metric
kzkadc Mar 24, 2024
510b1cb
Merge branch 'master' into cosine_similarity
vfdev-5 Mar 24, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
179 changes: 56 additions & 123 deletions tests/ignite/metrics/test_cosine_similarity.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import os
from typing import Tuple

import numpy as np
import pytest
import torch
from torch import Tensor

import ignite.distributed as idist
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics import CosineSimilarity

Expand All @@ -17,7 +19,7 @@ def test_zero_sample():
cos_sim.compute()


@pytest.fixture(params=[item for item in range(4)])
@pytest.fixture(params=list(range(4)))
def test_case(request):
return [
(torch.randn((100, 50)), torch.randn((100, 50)), 10 ** np.random.uniform(-8, 0), 1),
Expand All @@ -39,7 +41,7 @@ def test_case(request):


@pytest.mark.parametrize("n_times", range(5))
def test_compute(n_times, test_case):
def test_compute(n_times, test_case: Tuple[Tensor, Tensor, float, int]):
y_pred, y, eps, batch_size = test_case

cos = CosineSimilarity(eps=eps)
Expand All @@ -65,73 +67,6 @@ def test_compute(n_times, test_case):
assert pytest.approx(np_res, rel=2e-5) == cos.compute()


def _test_distrib_integration(device, tol=2e-5):
from ignite.engine import Engine

rank = idist.get_rank()
torch.manual_seed(12 + rank)

def _test(metric_device):
n_iters = 100
batch_size = 10
n_dims = 100

y_true = torch.randn((n_iters * batch_size, n_dims), dtype=torch.float).to(device)
y_preds = torch.normal(2.0, 3.0, size=(n_iters * batch_size, n_dims), dtype=torch.float).to(device)

def update(engine, i):
return (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)

engine = Engine(update)

m = CosineSimilarity(device=metric_device)
m.attach(engine, "cosine_similarity")

data = list(range(n_iters))
engine.run(data=data, max_epochs=1)

y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)

assert "cosine_similarity" in engine.state.metrics
res = engine.state.metrics["cosine_similarity"]

y_true_np = y_true.cpu().numpy()
y_preds_np = y_preds.cpu().numpy()
y_true_norm = np.clip(np.linalg.norm(y_true_np, axis=1, keepdims=True), 1e-8, None)
y_preds_norm = np.clip(np.linalg.norm(y_preds_np, axis=1, keepdims=True), 1e-8, None)
true_res = np.sum((y_true_np / y_true_norm) * (y_preds_np / y_preds_norm), axis=1)
true_res = np.mean(true_res)

assert pytest.approx(res, rel=tol) == true_res

_test("cpu")
if device.type != "xla":
_test(idist.device())


def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
device = torch.device(device)
cos = CosineSimilarity(device=metric_device)

for dev in [cos._device, cos._sum_of_cos_similarities.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"

y_pred = torch.tensor([[2.0, 3.0], [-2.0, 1.0]], dtype=torch.float)
y = torch.ones(2, 2, dtype=torch.float)
cos.update((y_pred, y))

for dev in [cos._device, cos._sum_of_cos_similarities.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"


def test_accumulator_detached():
cos = CosineSimilarity()

Expand All @@ -142,70 +77,68 @@ def test_accumulator_detached():
assert not cos._sum_of_cos_similarities.requires_grad


@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)

@pytest.mark.usefixtures("distributed")
class TestDistributed:
def test_integration(self):
tol = 2e-5
n_iters = 100
batch_size = 10
n_dims = 100

@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
rank = idist.get_rank()
torch.manual_seed(12 + rank)

device = idist.device()
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(device)

@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
for metric_device in metric_devices:
y_true = torch.randn((n_iters * batch_size, n_dims)).float().to(device)
y_preds = torch.normal(2.0, 3.0, size=(n_iters * batch_size, n_dims)).float().to(device)

gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
engine = Engine(
lambda e, i: (
y_preds[i * batch_size : (i + 1) * batch_size],
y_true[i * batch_size : (i + 1) * batch_size],
)
)

m = CosineSimilarity(device=metric_device)
m.attach(engine, "cosine_similarity")

@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)

y_preds = idist.all_gather(y_preds)
y_true = idist.all_gather(y_true)

@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
assert "cosine_similarity" in engine.state.metrics
res = engine.state.metrics["cosine_similarity"]

y_true_np = y_true.cpu().numpy()
y_preds_np = y_preds.cpu().numpy()
y_true_norm = np.clip(np.linalg.norm(y_true_np, axis=1, keepdims=True), 1e-8, None)
y_preds_norm = np.clip(np.linalg.norm(y_preds_np, axis=1, keepdims=True), 1e-8, None)
true_res = np.sum((y_true_np / y_true_norm) * (y_preds_np / y_preds_norm), axis=1)
true_res = np.mean(true_res)

@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
_test_distrib_accumulator_device(device)
assert pytest.approx(res, rel=tol) == true_res

def test_accumulator_device(self):
device = idist.device()
metric_devices = [torch.device("cpu")]
if device.type != "xla":
metric_devices.append(device)
for metric_device in metric_devices:
cos = CosineSimilarity(device=metric_device)

def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device, tol=1e-4)
_test_distrib_accumulator_device(device)
for dev in (cos._device, cos._sum_of_cos_similarities.device):
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"

y_pred = torch.tensor([[2.0, 3.0], [-2.0, 1.0]]).float()
y = torch.ones(2, 2).float()
cos.update((y_pred, y))

@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
for dev in (cos._device, cos._sum_of_cos_similarities.device):
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"