Skip to content

Commit

Permalink
Readme update (#1197)
Browse files Browse the repository at this point in the history
* readme update

* readme update

* readme update

* readme update

* docs update

* docs update
  • Loading branch information
Scitator committed Apr 25, 2021
1 parent 9a9901b commit 3bf9c1b
Show file tree
Hide file tree
Showing 11 changed files with 585 additions and 0 deletions.
73 changes: 73 additions & 0 deletions catalyst/metrics/_accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,45 @@ class AccuracyMetric(ICallbackBatchMetric):
Examples:
.. code-block:: python
import torch
from catalyst import metrics
outputs = torch.tensor([
[0.2, 0.5, 0.0, 0.3],
[0.9, 0.1, 0.0, 0.0],
[0.0, 0.1, 0.6, 0.3],
[0.0, 0.8, 0.2, 0.0],
])
targets = torch.tensor([3, 0, 2, 2])
metric = metrics.AccuracyMetric(topk_args=(1, 3))
metric.reset()
metric.update(outputs, targets)
metric.compute()
# (
# (0.5, 1.0), # top1, top3 mean
# (0.0, 0.0), # top1, top3 std
# )
metric.compute_key_value()
# {
# 'accuracy': 0.5,
# 'accuracy/std': 0.0,
# 'accuracy01': 0.5,
# 'accuracy01/std': 0.0,
# 'accuracy03': 1.0,
# 'accuracy03/std': 0.0,
# }
metric.reset()
metric(outputs, targets)
# (
# (0.5, 1.0), # top1, top3 mean
# (0.0, 0.0), # top1, top3 std
# )
.. code-block:: python
import torch
Expand Down Expand Up @@ -183,6 +222,40 @@ class MultilabelAccuracyMetric(AdditiveValueMetric, ICallbackBatchMetric):
Examples:
.. code-block:: python
import torch
from catalyst import metrics
outputs = torch.tensor([
[0.1, 0.9, 0.0, 0.8],
[0.96, 0.01, 0.85, 0.2],
[0.98, 0.4, 0.2, 0.1],
[0.1, 0.89, 0.2, 0.0],
])
targets = torch.tensor([
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
])
metric = metrics.MultilabelAccuracyMetric(threshold=0.6)
metric.reset()
metric.update(outputs, targets)
metric.compute()
# (0.75, 0.0) # mean, std
metric.compute_key_value()
# {
# 'accuracy': 0.75,
# 'accuracy/std': 0.0,
# }
metric.reset()
metric(outputs, targets)
# (0.75, 0.0) # mean, std
.. code-block:: python
import torch
Expand Down
15 changes: 15 additions & 0 deletions catalyst/metrics/_additive.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,21 @@ class AdditiveValueMetric(IMetric):
Examples:
.. code-block:: python
import numpy as np
from catalyst import metrics
values = [1, 2, 3, 4, 5]
num_samples_list = [1, 2, 3, 4, 5]
true_values = [1, 1.666667, 2.333333, 3, 3.666667]
metric = metrics.AdditiveValueMetric()
for value, num_samples, true_value in zip(values, num_samples_list, true_values):
metric.update(value=value, num_samples=num_samples)
mean, _ = metric.compute()
assert np.isclose(mean, true_value)
.. code-block:: python
import os
Expand Down
45 changes: 45 additions & 0 deletions catalyst/metrics/_auc.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,51 @@ class AUCMetric(ICallbackLoaderMetric):
Examples:
.. code-block:: python
import torch
from catalyst import metrics
scores = torch.tensor([
[0.9, 0.1],
[0.1, 0.9],
])
targets = torch.tensor([
[1, 0],
[0, 1],
])
metric = metrics.AUCMetric()
# for efficient statistics storage
metric.reset(num_batches=1, num_samples=len(scores))
metric.update(scores, targets)
metric.compute()
# (
# tensor([1., 1.]) # per class
# 1.0, # micro
# 1.0, # macro
# 1.0 # weighted
# )
metric.compute_key_value()
# {
# 'auc': 1.0,
# 'auc/_micro': 1.0,
# 'auc/_macro': 1.0,
# 'auc/_weighted': 1.0
# 'auc/class_00': 1.0,
# 'auc/class_01': 1.0,
# }
metric.reset(num_batches=1, num_samples=len(scores))
metric(scores, targets)
# (
# tensor([1., 1.]) # per class
# 1.0, # micro
# 1.0, # macro
# 1.0 # weighted
# )
.. code-block:: python
import torch
Expand Down
173 changes: 173 additions & 0 deletions catalyst/metrics/_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -483,6 +483,88 @@ class MulticlassPrecisionRecallF1SupportMetric(PrecisionRecallF1SupportMetric):
Examples:
.. code-block:: python
import torch
from catalyst import metrics
num_classes = 4
zero_division = 0
outputs_list = [torch.tensor([0, 1, 2]), torch.tensor([2, 3]), torch.tensor([0, 1, 3])]
targets_list = [torch.tensor([0, 1, 1]), torch.tensor([2, 3]), torch.tensor([0, 1, 2])]
metric = metrics.MulticlassPrecisionRecallF1SupportMetric(
num_classes=num_classes, zero_division=zero_division
)
metric.reset()
for outputs, targets in zip(outputs_list, targets_list):
metric.update(outputs=outputs, targets=targets)
metric.compute()
# (
# # per class precision, recall, f1, support
# (
# array([1. , 1. , 0.5, 0.5]),
# array([1. , 0.66666667, 0.5 , 1. ]),
# array([0.999995 , 0.7999952 , 0.499995 , 0.66666222]),
# array([2., 3., 2., 1.]),
# ),
# # micro precision, recall, f1, support
# (0.75, 0.75, 0.7499950000333331, None),
# # macro precision, recall, f1, support
# (0.75, 0.7916666666666667, 0.7416618555889127, None),
# # weighted precision, recall, f1, support
# (0.8125, 0.75, 0.7583284778110313, None)
# )
metric.compute_key_value()
# {
# 'f1/_macro': 0.7416618555889127,
# 'f1/_micro': 0.7499950000333331,
# 'f1/_weighted': 0.7583284778110313,
# 'f1/class_00': 0.9999950000249999,
# 'f1/class_01': 0.7999952000287999,
# 'f1/class_02': 0.49999500004999947,
# 'f1/class_03': 0.6666622222518517,
# 'precision/_macro': 0.75,
# 'precision/_micro': 0.75,
# 'precision/_weighted': 0.8125,
# 'precision/class_00': 1.0,
# 'precision/class_01': 1.0,
# 'precision/class_02': 0.5,
# 'precision/class_03': 0.5,
# 'recall/_macro': 0.7916666666666667,
# 'recall/_micro': 0.75,
# 'recall/_weighted': 0.75,
# 'recall/class_00': 1.0,
# 'recall/class_01': 0.6666666666666667,
# 'recall/class_02': 0.5,
# 'recall/class_03': 1.0,
# 'support/class_00': 2.0,
# 'support/class_01': 3.0,
# 'support/class_02': 2.0,
# 'support/class_03': 1.0
# }
metric.reset()
metric(outputs_list[0], targets_list[0])
# (
# # per class precision, recall, f1, support
# (
# array([1., 1., 0., 0.]),
# array([1. , 0.5, 0. , 0. ]),
# array([0.999995 , 0.66666222, 0. , 0. ]),
# array([1., 2., 0., 0.]),
# ),
# # micro precision, recall, f1, support
# (0.6666666666666667, 0.6666666666666667, 0.6666616667041664, None),
# # macro precision, recall, f1, support
# (0.5, 0.375, 0.41666430556921286, None),
# # weighted precision, recall, f1, support
# (1.0, 0.6666666666666666, 0.7777731481762343, None)
# )
.. code-block:: python
import torch
Expand Down Expand Up @@ -572,6 +654,97 @@ class MultilabelPrecisionRecallF1SupportMetric(PrecisionRecallF1SupportMetric):
Examples:
.. code-block:: python
import torch
from catalyst import metrics
num_classes = 4
zero_division = 0
outputs_list = [
torch.tensor([[0, 1, 0, 1], [0, 0, 0, 0], [0, 1, 1, 0]]),
torch.tensor([[0, 1, 1, 1], [0, 0, 0, 1], [0, 1, 0, 1]]),
torch.tensor([[0, 1, 0, 0], [0, 1, 0, 1]]),
]
targets_list = [
torch.tensor([[0, 1, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]]),
torch.tensor([[0, 1, 0, 0], [0, 0, 1, 1], [1, 0, 1, 0]]),
torch.tensor([[0, 1, 0, 0], [0, 0, 1, 0]]),
]
metric = metrics.MultilabelPrecisionRecallF1SupportMetric(
num_classes=num_classes, zero_division=zero_division
)
metric.reset()
for outputs, targets in zip(outputs_list, targets_list):
metric.update(outputs=outputs, targets=targets)
metric.compute()
# (
# # per class precision, recall, f1, support
# (
# array([0. , 0.66666667, 0. , 0.4 ]),
# array([0. , 1. , 0. , 0.66666667]),
# array([0. , 0.7999952 , 0. , 0.49999531]),
# array([1., 4., 4., 3.])
# ),
# # micro precision, recall, f1, support
# (0.46153846153846156, 0.5, 0.4799950080519163, None),
# # macro precision, recall, f1, support
# (0.2666666666666667, 0.4166666666666667, 0.32499762814318617, None),
# # weighted precision, recall, f1, support
# (0.32222222222222224, 0.5, 0.39166389481225283, None)
# )
metric.compute_key_value()
# {
# 'f1/_macro': 0.32499762814318617,
# 'f1/_micro': 0.4799950080519163,
# 'f1/_weighted': 0.39166389481225283,
# 'f1/class_00': 0.0,
# 'f1/class_01': 0.7999952000287999,
# 'f1/class_02': 0.0,
# 'f1/class_03': 0.49999531254394486,
# 'precision/_macro': 0.2666666666666667,
# 'precision/_micro': 0.46153846153846156,
# 'precision/_weighted': 0.32222222222222224,
# 'precision/class_00': 0.0,
# 'precision/class_01': 0.6666666666666667,
# 'precision/class_02': 0.0,
# 'precision/class_03': 0.4,
# 'recall/_macro': 0.4166666666666667,
# 'recall/_micro': 0.5,
# 'recall/_weighted': 0.5,
# 'recall/class_00': 0.0,
# 'recall/class_01': 1.0,
# 'recall/class_02': 0.0,
# 'recall/class_03': 0.6666666666666667,
# 'support/class_00': 1.0,
# 'support/class_01': 4.0,
# 'support/class_02': 4.0,
# 'support/class_03': 3.0
# }
metric.reset()
metric(outputs_list[0], targets_list[0])
# (
# # per class precision, recall, f1, support
# (
# array([0., 1., 0., 1.]),
# array([0. , 1. , 0. , 0.5]),
# array([0. , 0.999995 , 0. , 0.66666222]),
# array([0., 2., 1., 2.])
# ),
# # micro precision, recall, f1, support
# (0.75, 0.6, 0.6666617284316411, None),
# # macro precision, recall, f1, support
# (0.5, 0.375, 0.41666430556921286, None),
# # weighted precision, recall, f1, support
# (0.8, 0.6000000000000001, 0.6666628889107407, None)
# )
.. code-block:: python
import torch
Expand Down
37 changes: 37 additions & 0 deletions catalyst/metrics/_cmc_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,43 @@ class CMCMetric(AccumulationMetric):
Examples:
.. code-block:: python
import torch
from catalyst import metrics
batch = {
"embeddings": torch.tensor(
[
[1, 1, 0, 0],
[1, 0, 1, 1],
[0, 1, 1, 1],
[0, 0, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1],
[0, 1, 1, 0],
]
).float(),
"labels": torch.tensor([0, 0, 1, 1, 0, 1, 1]),
"is_query": torch.tensor([1, 1, 1, 1, 0, 0, 0]).bool(),
}
topk = (1, 3)
metric = metrics.CMCMetric(
embeddings_key="embeddings",
labels_key="labels",
is_query_key="is_query",
topk_args=topk,
)
metric.reset(num_batches=1, num_samples=len(batch["embeddings"]))
metric.update(**batch)
metric.compute()
# [0.75, 1.0] # CMC@01, CMC@03
metric.compute_key_value()
# {'cmc01': 0.75, 'cmc03': 1.0}
.. code-block:: python
import os
Expand Down
Loading

0 comments on commit 3bf9c1b

Please sign in to comment.