-
Notifications
You must be signed in to change notification settings - Fork 228
/
question_answering.py
239 lines (202 loc) 路 9.34 KB
/
question_answering.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Lint as: python3
from datasets import Dataset
try:
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
from ..utils.logging import get_logger
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
from .utils import DatasetColumn
if TYPE_CHECKING:
from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
logger = get_logger(__name__)
TASK_DOCUMENTATION = r"""
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("question-answering")
>>> data = load_dataset("squad", split="validation[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="sshleifer/tiny-distilbert-base-cased-distilled-squad",
>>> data=data,
>>> metric="squad",
>>> )
```
<Tip>
Datasets where the answer may be missing in the context are supported, for example SQuAD v2 dataset. In this case, it is safer to pass `squad_v2_format=True` to
the compute() call.
</Tip>
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("question-answering")
>>> data = load_dataset("squad_v2", split="validation[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="mrm8488/bert-tiny-finetuned-squadv2",
>>> data=data,
>>> metric="squad_v2",
>>> squad_v2_format=True,
>>> )
```
"""
class QuestionAnsweringEvaluator(Evaluator):
"""
Question answering evaluator. This evaluator handles
[**extractive** question answering](https://huggingface.co/docs/transformers/task_summary#extractive-question-answering),
where the answer to the question is extracted from a context.
This question answering evaluator can currently be loaded from [`evaluator`] using the default task name
`question-answering`.
Methods in this class assume a data format compatible with the
[`~transformers.QuestionAnsweringPipeline`].
"""
PIPELINE_KWARGS = {}
def __init__(self, task="question-answering", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def prepare_data(
self, data: Dataset, question_column: str, context_column: str, id_column: str, label_column: str
):
"""Prepare data."""
if data is None:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
self.check_required_columns(
data,
{
"question_column": question_column,
"context_column": context_column,
"id_column": id_column,
"label_column": label_column,
},
)
metric_inputs = dict()
metric_inputs["references"] = [
{"id": element[id_column], "answers": element[label_column]} for element in data
]
return metric_inputs, {
"question": DatasetColumn(data, question_column),
"context": DatasetColumn(data, context_column),
}
def is_squad_v2_format(self, data: Dataset, label_column: str = "answers"):
"""
Check if the provided dataset follows the squad v2 data schema, namely possible samples where the answer is not in the context.
In this case, the answer text list should be `[]`.
"""
original_num_rows = data.num_rows
nonempty_num_rows = data.filter(
lambda x: len(x[label_column]["text"]) > 0, load_from_cache_file=False
).num_rows
if original_num_rows > nonempty_num_rows:
return True
else:
return False
def predictions_processor(self, predictions: List, squad_v2_format: bool, ids: List):
result = []
for i in range(len(predictions)):
pred = {"prediction_text": predictions[i]["answer"], "id": ids[i]}
if squad_v2_format:
pred["no_answer_probability"] = predictions[i]["score"]
result.append(pred)
return {"predictions": result}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
question_column: str = "question",
context_column: str = "context",
id_column: str = "id",
label_column: str = "answers",
squad_v2_format: Optional[bool] = None,
) -> Tuple[Dict[str, float], Any]:
"""
question_column (`str`, defaults to `"question"`):
The name of the column containing the question in the dataset specified by `data`.
context_column (`str`, defaults to `"context"`):
The name of the column containing the context in the dataset specified by `data`.
id_column (`str`, defaults to `"id"`):
The name of the column containing the identification field of the question and answer pair in the
dataset specified by `data`.
label_column (`str`, defaults to `"answers"`):
The name of the column containing the answers in the dataset specified by `data`.
squad_v2_format (`bool`, *optional*, defaults to `None`):
Whether the dataset follows the format of squad_v2 dataset. This is the case when the provided dataset
has questions where the answer is not in the context, more specifically when are answers as
`{"text": [], "answer_start": []}` in the answer column. If all questions have at least one answer, this parameter
should be set to `False`. If this parameter is not provided, the format will be automatically inferred.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data,
question_column=question_column,
context_column=context_column,
id_column=id_column,
label_column=label_column,
)
if squad_v2_format is None:
squad_v2_format = self.is_squad_v2_format(data=data, label_column=label_column)
logger.warning(
f"`squad_v2_format` parameter not provided to QuestionAnsweringEvaluator.compute(). Automatically inferred `squad_v2_format` as {squad_v2_format}."
)
pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
metric = self.prepare_metric(metric)
if squad_v2_format and metric.name == "squad":
logger.warning(
"The dataset has SQuAD v2 format but you are using the SQuAD metric. Consider passing the 'squad_v2' metric."
)
if not squad_v2_format and metric.name == "squad_v2":
logger.warning(
"The dataset has SQuAD v1 format but you are using the SQuAD v2 metric. Consider passing the 'squad' metric."
)
if squad_v2_format:
self.PIPELINE_KWARGS["handle_impossible_answer"] = True
else:
self.PIPELINE_KWARGS["handle_impossible_answer"] = False
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, **pipe_inputs)
predictions = self.predictions_processor(predictions, squad_v2_format=squad_v2_format, ids=data[id_column])
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result