Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions belar/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from belar.metrics.base import Evaluation, Metric
from belar.metrics.similarity import *
from belar.metrics.simple import *
from belar.metrics.similarity import SBERTScore
from belar.metrics.factual import EntailmentScore
from belar.metrics.similarity import SBERTScore
from belar.metrics.simple import *
11 changes: 9 additions & 2 deletions belar/metrics/factual.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from __future__ import annotations

from transformers import AutoTokenizer, AutoModelForSequenceClassification
import typing as t
from dataclasses import dataclass

from transformers import AutoModelForSequenceClassification, AutoTokenizer

from belar.metrics import Metric
from belar.utils import device_check

Expand All @@ -15,6 +16,7 @@ class EntailmentScore(Metric):
"""

model_name: str = "typeform/distilbert-base-uncased-mnli"
max_length: int = 512
batch_size: int = 4
device: t.Literal["cpu", "cuda"] = "cpu"

Expand Down Expand Up @@ -67,7 +69,12 @@ def score(
"""

encodings = self.tokenizer(
ground_truth, generated_text, truncation=True, return_tensors="pt"
ground_truth,
generated_text,
truncation=True,
return_tensors="pt",
max_length=self.max_length,
padding="max_length",
)

score = self.batch_infer(encodings)
Expand Down
15 changes: 11 additions & 4 deletions belar/metrics/simple.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
from __future__ import annotations

from Levenshtein import distance, ratio
import typing as t
from dataclasses import dataclass
from dataclasses import dataclass, field

from Levenshtein import distance, ratio
from nltk.tokenize import word_tokenize
from nltk.translate.bleu_score import sentence_bleu
from rouge_score import rouge_scorer
Expand All @@ -14,7 +15,7 @@

@dataclass
class BLEU(Metric):
weights: t.List[float] = [0.25, 0.25, 0.25, 0.25]
weights: list[float] = field(default_factory=lambda: [0.25, 0.25, 0.25, 0.25])
smoothing_function = None

@property
Expand Down Expand Up @@ -49,9 +50,11 @@ def __post_init__(self):
[self.type], use_stemmer=self.use_stemmer
)

@property
def name(self):
return self.type

@property
def is_batchable(self):
return False

Expand All @@ -65,6 +68,7 @@ def score(
return scores


@dataclass
class EditScore(Metric):
measure: t.Literal["distance", "ratio"] = "ratio"

Expand All @@ -90,5 +94,8 @@ def score(self, ground_truth: t.List[str], generated_text: t.List[str]):
Rouge1 = ROUGE("rouge1")
Rouge2 = ROUGE("rouge2")
RougeL = ROUGE("rougeL")
BLUE = BLEU()
EditDistance = EditScore("distance")
EditRatio = EditScore("ratio")

__all__ = ["Rouge1", "Rouge2", "RougeL"]
__all__ = ["Rouge1", "Rouge2", "RougeL", "BLEU", "EditDistance", "EditRatio"]
Loading