Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 44 additions & 1 deletion ignite/metrics/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,52 @@ class Loss(Metric):
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.

Attributes:
required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the
latter is a dictionary. Default, ``("y_pred", "y", "criterion_kwargs")``. This is useful when the
criterion function requires additional arguments, which can be passed using ``criterion_kwargs``.
See notes below for an example.

Note:

Let's implement a Loss metric that requires ``x``, ``y_pred``, ``y`` and ``criterion_kwargs`` as input
for ``criterion`` function. In the example below we show how to setup standard metric like Accuracy
and the Loss metric using an ``evaluator`` created with
:meth:`~ignite.engine.create_supervised_evaluator` method.

.. code-block:: python

import torch
import torch.nn as nn
from torch.nn.functional import nll_loss

from ignite.metrics import Accuracy, Loss
from ignite.engine import create_supervised_evaluator

model = ...

criterion = nll_loss

metrics = {
"Accuracy": Accuracy(),
"Loss": Loss(criterion)
}

# global criterion kwargs
criterion_kwargs = {...}

evaluator = create_supervised_evaluator(
model,
metrics=metrics,
output_transform=lambda x, y, y_pred: {
"x": x, "y": y, "y_pred": y_pred, "criterion_kwargs": criterion_kwargs}
)

res = evaluator.run(data)

"""

required_output_keys = None
required_output_keys = ("y_pred", "y", "criterion_kwargs")

def __init__(
self,
Expand Down
88 changes: 87 additions & 1 deletion tests/ignite/metrics/test_loss.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
from unittest.mock import MagicMock

import pytest
import torch
Expand All @@ -7,8 +8,48 @@
from torch.nn.functional import nll_loss

import ignite.distributed as idist
from ignite.engine import State
from ignite.exceptions import NotComputableError
from ignite.metrics import Loss
from ignite.metrics import Loss, Precision


class DummyLoss1(Loss):
def __init__(self, loss_fn, true_output, output_transform=lambda x: x):
super(DummyLoss1, self).__init__(loss_fn, output_transform=output_transform)
print(true_output)
self.true_output = true_output

def reset(self):
pass

def compute(self):
pass

def update(self, output):

assert output == self.true_output


def test_output_as_mapping_without_criterion_kwargs():
y_pred = torch.Tensor([[2.0], [-2.0]])
y = torch.zeros(2)
criterion_kwargs = {}

loss_metric = DummyLoss1(nll_loss, true_output=(y_pred, y, criterion_kwargs))
state = State(output=({"y_pred": y_pred, "y": y, "criterion_kwargs": {}}))
engine = MagicMock(state=state)
loss_metric.iteration_completed(engine)


def test_output_as_mapping_with_criterion_kwargs():
y_pred = torch.Tensor([[2.0], [-2.0]])
y = torch.zeros(2)
criterion_kwargs = {"reduction": "sum"}

loss_metric = DummyLoss1(nll_loss, true_output=(y_pred, y, criterion_kwargs))
state = State(output=({"y_pred": y_pred, "y": y, "criterion_kwargs": {"reduction": "sum"}}))
engine = MagicMock(state=state)
loss_metric.iteration_completed(engine)


def y_test_1(requires_grad=False, device=None):
Expand Down Expand Up @@ -252,3 +293,48 @@ def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_compute_on_criterion(device, y_test_1(), y_test_2())
_test_distrib_accumulator_device(device, y_test_1())


def test_override_required_output_keys():
# https://github.com/pytorch/ignite/issues/1415
from ignite.engine import create_supervised_evaluator

counter = [0]

class DummyLoss2(Loss):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

def update(self, output):
y_pred, y, criterion_kwargs = output
assert y_pred.shape == (4, 3)
assert y.shape == (4,)
assert criterion_kwargs == c_kwargs
assert y.equal(data[counter[0]][1])
counter[0] += 1

def reset(self):
pass

def compute(self):
pass

model = nn.Linear(10, 3)

metrics = {"Precision": Precision(), "DummyLoss2": DummyLoss2(nll_loss)}

# global criterion kwargs
c_kwargs = {"reduction": "sum"}

evaluator = create_supervised_evaluator(
model,
metrics=metrics,
output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred, "criterion_kwargs": c_kwargs},
)

data = [
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
]
evaluator.run(data)