-
Notifications
You must be signed in to change notification settings - Fork 387
/
ter.py
160 lines (130 loc) · 6.59 KB
/
ter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
# Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.ter import _ter_compute, _ter_update, _TercomTokenizer
from torchmetrics.metric import Metric
from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
if not _MATPLOTLIB_AVAILABLE:
__doctest_skip__ = ["TranslationEditRate.plot"]
class TranslationEditRate(Metric):
"""Calculate Translation edit rate (`TER`_) of machine translated text with one or more references.
This implementation follows the one from `SacreBleu_ter`_, which is a
near-exact reimplementation of the Tercom algorithm, produces identical results on all "sane" outputs.
As input to ``forward`` and ``update`` the metric accepts the following input:
- ``preds`` (:class:`~Sequence`): An iterable of hypothesis corpus
- ``target`` (:class:`~Sequence`): An iterable of iterables of reference corpus
As output of ``forward`` and ``compute`` the metric returns the following output:
- ``ter`` (:class:`~torch.Tensor`): if ``return_sentence_level_score=True`` return a corpus-level translation
edit rate with a list of sentence-level translation_edit_rate, else return a corpus-level translation edit rate
Args:
normalize: An indication whether a general tokenization to be applied.
no_punctuation: An indication whteher a punctuation to be removed from the sentences.
lowercase: An indication whether to enable case-insensitivity.
asian_support: An indication whether asian characters to be processed.
return_sentence_level_score: An indication whether a sentence-level TER to be returned.
kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Example:
>>> from torchmetrics.text import TranslationEditRate
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> ter = TranslationEditRate()
>>> ter(preds, target)
tensor(0.1538)
"""
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
plot_lower_bound: float = 0.0
plot_upper_bound: float = 1.0
total_num_edits: Tensor
total_tgt_len: Tensor
sentence_ter: Optional[List[Tensor]] = None
def __init__(
self,
normalize: bool = False,
no_punctuation: bool = False,
lowercase: bool = True,
asian_support: bool = False,
return_sentence_level_score: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if not isinstance(normalize, bool):
raise ValueError(f"Expected argument `normalize` to be of type boolean but got {normalize}.")
if not isinstance(no_punctuation, bool):
raise ValueError(f"Expected argument `no_punctuation` to be of type boolean but got {no_punctuation}.")
if not isinstance(lowercase, bool):
raise ValueError(f"Expected argument `lowercase` to be of type boolean but got {lowercase}.")
if not isinstance(asian_support, bool):
raise ValueError(f"Expected argument `asian_support` to be of type boolean but got {asian_support}.")
self.tokenizer = _TercomTokenizer(normalize, no_punctuation, lowercase, asian_support)
self.return_sentence_level_score = return_sentence_level_score
self.add_state("total_num_edits", tensor(0.0), dist_reduce_fx="sum")
self.add_state("total_tgt_len", tensor(0.0), dist_reduce_fx="sum")
if self.return_sentence_level_score:
self.add_state("sentence_ter", [], dist_reduce_fx="cat")
def update(self, preds: Union[str, Sequence[str]], target: Sequence[Union[str, Sequence[str]]]) -> None:
"""Update state with predictions and targets."""
self.total_num_edits, self.total_tgt_len, self.sentence_ter = _ter_update(
preds,
target,
self.tokenizer,
self.total_num_edits,
self.total_tgt_len,
self.sentence_ter,
)
def compute(self) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Calculate the translate error rate (TER)."""
ter = _ter_compute(self.total_num_edits, self.total_tgt_len)
if self.sentence_ter is not None:
return ter, torch.cat(self.sentence_ter)
return ter
def plot(
self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
) -> _PLOT_OUT_TYPE:
"""Plot a single or multiple values from the metric.
Args:
val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
If no value is provided, will automatically call `metric.compute` and plot that result.
ax: An matplotlib axis object. If provided will add plot to that axis
Returns:
Figure and Axes object
Raises:
ModuleNotFoundError:
If `matplotlib` is not installed
.. plot::
:scale: 75
>>> # Example plotting a single value
>>> from torchmetrics.text import TranslationEditRate
>>> metric = TranslationEditRate()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> metric.update(preds, target)
>>> fig_, ax_ = metric.plot()
.. plot::
:scale: 75
>>> # Example plotting multiple values
>>> from torchmetrics.text import TranslationEditRate
>>> metric = TranslationEditRate()
>>> preds = ['the cat is on the mat']
>>> target = [['there is a cat on the mat', 'a cat is on the mat']]
>>> values = [ ]
>>> for _ in range(10):
... values.append(metric(preds, target))
>>> fig_, ax_ = metric.plot(values)
"""
return self._plot(val, ax)