Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion src/lighteval/metrics/utils/extractive_match_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,10 @@ def extract_expr(match: re.Match, timeout_seconds: int) -> tuple[str | sympy.Exp

decimal = decimal.replace(",", ".")
number_str = f"{integer}{decimal}"
number = Number(number_str)
try:
number = Number(number_str)
except Exception:
return None, number_str

if is_percentage:
number = convert_to_pct(number)
Expand Down
6 changes: 5 additions & 1 deletion src/lighteval/models/model_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,11 @@ def to_vllm_dict(self) -> dict:

# Task specific sampling params to set in model: n, best_of, use_beam_search
# Generation specific params to set in model: logprobs, prompt_logprobs
return {sampling_params_to_vllm_naming.get(k, k): v for k, v in asdict(self).items() if v is not None}
x = {sampling_params_to_vllm_naming.get(k, k): v for k, v in asdict(self).items() if v is not None}
# VLLM max_tokens is 16 by default, however the pipeline expect the max_tokens to be None, if the user didn't specify it
if not x.get("max_tokens"):
x["max_tokens"] = None
return x

def to_vllm_openai_dict(self) -> dict:
"""Selects relevant generation and sampling parameters for vllm and openai models.
Expand Down