From 44de8975abba456b6d2b86710a6641696b11ab5a Mon Sep 17 00:00:00 2001 From: 01-vyom Date: Thu, 3 Jun 2021 01:22:50 +0530 Subject: [PATCH 1/4] ENH: Updated Loss metric to use required_output_keys --- ignite/metrics/loss.py | 45 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py index 3cf90a7519fb..b3fd56bed898 100644 --- a/ignite/metrics/loss.py +++ b/ignite/metrics/loss.py @@ -30,9 +30,52 @@ class Loss(Metric): metric's device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By default, CPU. + Attributes: + required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the + latter is a dictionary. Default, ``("y_pred", "y", "criterion_kwargs")``. This is useful when the + criterion function requires additional argument, + which can be passed using ``criterion_kwargs``. + See notes below for an example. + + Note: + + Let's implement a Loss metric that requires ``y_pred``, ``y`` and ``criterion_kwargs`` as input + for ``criterion`` function. In the example below we show how to setup standard metric like Accuracy + and the Loss metric using an ``evaluator`` created + with :meth:`~ignite.engine.create_supervised_evaluator` method. + + .. code-block:: python + + import torch + import torch.nn as nn + + from ignite.metrics import Accuracy, Loss + from ignite.engine import create_supervised_evaluator + + model = ... + + criterion = nn.NLLLoss() + + metrics = { + "Accuracy": Accuracy(), + "Loss": Loss(criterion) + } + + # global criterion kwargs + criterion_kwargs = {...} + + evaluator = create_supervised_evaluator( + model, + metrics=metrics, + output_transform=lambda x, y, y_pred: { + "x": x, "y": y, "y_pred": y_pred, "criterion_kwargs": criterion_kwargs} + ) + + res = evaluator.run(data) + """ - required_output_keys = None + required_output_keys = ("y_pred", "y", "criterion_kwargs") def __init__( self, From 6492367d5a6a634ae004d3f73c7b25bafcb538c7 Mon Sep 17 00:00:00 2001 From: 01-vyom Date: Thu, 3 Jun 2021 12:31:43 +0530 Subject: [PATCH 2/4] TST: Added unit and integration tests for Loss metric with required_output_keys support --- ignite/metrics/loss.py | 10 +-- tests/ignite/metrics/test_loss.py | 123 +++++++++++++++++++++++++++++- 2 files changed, 127 insertions(+), 6 deletions(-) diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py index b3fd56bed898..d0f367614f5d 100644 --- a/ignite/metrics/loss.py +++ b/ignite/metrics/loss.py @@ -33,28 +33,28 @@ class Loss(Metric): Attributes: required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the latter is a dictionary. Default, ``("y_pred", "y", "criterion_kwargs")``. This is useful when the - criterion function requires additional argument, - which can be passed using ``criterion_kwargs``. + criterion function requires additional argument, which can be passed using ``criterion_kwargs``. See notes below for an example. Note: Let's implement a Loss metric that requires ``y_pred``, ``y`` and ``criterion_kwargs`` as input for ``criterion`` function. In the example below we show how to setup standard metric like Accuracy - and the Loss metric using an ``evaluator`` created - with :meth:`~ignite.engine.create_supervised_evaluator` method. + and the Loss metric using an ``evaluator`` created with + :meth:`~ignite.engine.create_supervised_evaluator` method. .. code-block:: python import torch import torch.nn as nn + from torch.nn.functional import nll_loss from ignite.metrics import Accuracy, Loss from ignite.engine import create_supervised_evaluator model = ... - criterion = nn.NLLLoss() + criterion = nll_loss() metrics = { "Accuracy": Accuracy(), diff --git a/tests/ignite/metrics/test_loss.py b/tests/ignite/metrics/test_loss.py index 23749b960072..e4f566927188 100644 --- a/tests/ignite/metrics/test_loss.py +++ b/tests/ignite/metrics/test_loss.py @@ -1,4 +1,5 @@ import os +from unittest.mock import MagicMock import pytest import torch @@ -7,8 +8,83 @@ from torch.nn.functional import nll_loss import ignite.distributed as idist +from ignite.engine import State from ignite.exceptions import NotComputableError -from ignite.metrics import Loss +from ignite.metrics import Loss, Precision + + +class DummyLoss1(Loss): + def __init__(self, loss_fn, true_output, output_transform=lambda x: x): + super(DummyLoss1, self).__init__(loss_fn, output_transform=output_transform) + print(true_output) + self.true_output = true_output + + def reset(self): + pass + + def compute(self): + pass + + def update(self, output): + + assert output == self.true_output + + +def test_output_as_mapping_wrong_keys(): + loss_metric = DummyLoss1(nll_loss, true_output=(0, 1, {})) + state = State(output=({"y1": 0, "y2": 1, "kwargs": {}})) + engine = MagicMock(state=state) + + with pytest.raises( + ValueError, + match=r"When transformed engine's output is a mapping, " + r"it should contain \('y_pred', 'y', 'criterion_kwargs'\) keys", + ): + loss_metric.iteration_completed(engine) + + +def test_output_as_mapping_keys_is_none(): + class DummyLoss(Loss): + required_output_keys = None + + def reset(self): + pass + + def compute(self): + pass + + def update(self, output): + pass + + loss_metric = DummyLoss(nll_loss) + assert loss_metric.required_output_keys is None + state = State(output=({"y1": 0, "y2": 1, "kwargs": {}})) + engine = MagicMock(state=state) + + with pytest.raises(TypeError, match=r"Transformed engine output for DummyLoss metric should be a tuple/list"): + loss_metric.iteration_completed(engine) + + +def test_output_as_mapping_without_criterion_kwargs(): + y_pred = torch.Tensor([[2.0], [-2.0]]) + y = torch.zeros(2) + criterion_kwargs = {} + + loss_metric = DummyLoss1(nll_loss, true_output=(y_pred, y, criterion_kwargs)) + state = State(output=({"y_pred": y_pred, "y": y, "criterion_kwargs": {}})) + engine = MagicMock(state=state) + loss_metric.iteration_completed(engine) + + +def test_output_as_mapping_with_criterion_kwargs(): + y_pred = torch.Tensor([[2.0], [-2.0]]) + y = torch.zeros(2) + criterion_kwargs = {"reduction": "sum"} + + loss_metric = DummyLoss1(nll_loss, true_output=(y_pred, y, criterion_kwargs)) + state = State(output=({"y_pred": y_pred, "y": y, "criterion_kwargs": {"reduction": "sum"}})) + engine = MagicMock(state=state) + loss_metric.iteration_completed(engine) def y_test_1(requires_grad=False, device=None): @@ -252,3 +328,48 @@ def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl): device = idist.device() _test_distrib_compute_on_criterion(device, y_test_1(), y_test_2()) _test_distrib_accumulator_device(device, y_test_1()) + + +def test_override_required_output_keys(): + # https://github.com/pytorch/ignite/issues/1415 + from ignite.engine import create_supervised_evaluator + + counter = [0] + + class DummyLoss2(Loss): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def update(self, output): + y_pred, y, criterion_kwargs = output + assert y_pred.shape == (4, 3) + assert y.shape == (4,) + assert criterion_kwargs == c_kwargs + assert y.equal(data[counter[0]][1]) + counter[0] += 1 + + def reset(self): + pass + + def compute(self): + pass + + model = nn.Linear(10, 3) + + metrics = {"Precision": Precision(), "DummyLoss2": DummyLoss2(nll_loss)} + + # global criterion kwargs + c_kwargs = {"reduction": "sum"} + + evaluator = create_supervised_evaluator( + model, + metrics=metrics, + output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred, "criterion_kwargs": c_kwargs}, + ) + + data = [ + (torch.rand(4, 10), torch.randint(0, 3, size=(4,))), + (torch.rand(4, 10), torch.randint(0, 3, size=(4,))), + (torch.rand(4, 10), torch.randint(0, 3, size=(4,))), + ] + evaluator.run(data) From 4b6fb88d20964639fe157670e3270876ae2ea5a6 Mon Sep 17 00:00:00 2001 From: 01-vyom Date: Thu, 3 Jun 2021 18:12:00 +0530 Subject: [PATCH 3/4] TST: Updated test cases for Loss metric --- ignite/metrics/loss.py | 2 +- tests/ignite/metrics/test_loss.py | 35 ------------------------------- 2 files changed, 1 insertion(+), 36 deletions(-) diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py index d0f367614f5d..529bfc1ea98d 100644 --- a/ignite/metrics/loss.py +++ b/ignite/metrics/loss.py @@ -54,7 +54,7 @@ class Loss(Metric): model = ... - criterion = nll_loss() + criterion = nll_loss metrics = { "Accuracy": Accuracy(), diff --git a/tests/ignite/metrics/test_loss.py b/tests/ignite/metrics/test_loss.py index e4f566927188..43adfc196381 100644 --- a/tests/ignite/metrics/test_loss.py +++ b/tests/ignite/metrics/test_loss.py @@ -30,41 +30,6 @@ def update(self, output): assert output == self.true_output -def test_output_as_mapping_wrong_keys(): - loss_metric = DummyLoss1(nll_loss, true_output=(0, 1, {})) - state = State(output=({"y1": 0, "y2": 1, "kwargs": {}})) - engine = MagicMock(state=state) - - with pytest.raises( - ValueError, - match=r"When transformed engine's output is a mapping, " - r"it should contain \('y_pred', 'y', 'criterion_kwargs'\) keys", - ): - loss_metric.iteration_completed(engine) - - -def test_output_as_mapping_keys_is_none(): - class DummyLoss(Loss): - required_output_keys = None - - def reset(self): - pass - - def compute(self): - pass - - def update(self, output): - pass - - loss_metric = DummyLoss(nll_loss) - assert loss_metric.required_output_keys is None - state = State(output=({"y1": 0, "y2": 1, "kwargs": {}})) - engine = MagicMock(state=state) - - with pytest.raises(TypeError, match=r"Transformed engine output for DummyLoss metric should be a tuple/list"): - loss_metric.iteration_completed(engine) - - def test_output_as_mapping_without_criterion_kwargs(): y_pred = torch.Tensor([[2.0], [-2.0]]) y = torch.zeros(2) From 7350b1660cc254f6a4cae58ce66e8f954ee3b6eb Mon Sep 17 00:00:00 2001 From: 01-vyom Date: Thu, 3 Jun 2021 20:46:13 +0530 Subject: [PATCH 4/4] DOC: Changed documentation for Loss metric --- ignite/metrics/loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py index 529bfc1ea98d..fbda23b867da 100644 --- a/ignite/metrics/loss.py +++ b/ignite/metrics/loss.py @@ -33,12 +33,12 @@ class Loss(Metric): Attributes: required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the latter is a dictionary. Default, ``("y_pred", "y", "criterion_kwargs")``. This is useful when the - criterion function requires additional argument, which can be passed using ``criterion_kwargs``. + criterion function requires additional arguments, which can be passed using ``criterion_kwargs``. See notes below for an example. Note: - Let's implement a Loss metric that requires ``y_pred``, ``y`` and ``criterion_kwargs`` as input + Let's implement a Loss metric that requires ``x``, ``y_pred``, ``y`` and ``criterion_kwargs`` as input for ``criterion`` function. In the example below we show how to setup standard metric like Accuracy and the Loss metric using an ``evaluator`` created with :meth:`~ignite.engine.create_supervised_evaluator` method.