Skip to content

Commit d81bafd

Browse files
hynky1999Hynek Kydlicek
andauthored
VLLM + Math-Verify fixes (#603)
* nits * 🥰 pretty 🥰 --------- Co-authored-by: Hynek Kydlicek <[email protected]>
1 parent f2ddc52 commit d81bafd

File tree

2 files changed

+9
-2
lines changed

2 files changed

+9
-2
lines changed

src/lighteval/metrics/utils/extractive_match_utils.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,10 @@ def extract_expr(match: re.Match, timeout_seconds: int) -> tuple[str | sympy.Exp
398398

399399
decimal = decimal.replace(",", ".")
400400
number_str = f"{integer}{decimal}"
401-
number = Number(number_str)
401+
try:
402+
number = Number(number_str)
403+
except Exception:
404+
return None, number_str
402405

403406
if is_percentage:
404407
number = convert_to_pct(number)

src/lighteval/models/model_input.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,11 @@ def to_vllm_dict(self) -> dict:
120120

121121
# Task specific sampling params to set in model: n, best_of, use_beam_search
122122
# Generation specific params to set in model: logprobs, prompt_logprobs
123-
return {sampling_params_to_vllm_naming.get(k, k): v for k, v in asdict(self).items() if v is not None}
123+
x = {sampling_params_to_vllm_naming.get(k, k): v for k, v in asdict(self).items() if v is not None}
124+
# VLLM max_tokens is 16 by default, however the pipeline expect the max_tokens to be None, if the user didn't specify it
125+
if not x.get("max_tokens"):
126+
x["max_tokens"] = None
127+
return x
124128

125129
def to_vllm_openai_dict(self) -> dict:
126130
"""Selects relevant generation and sampling parameters for vllm and openai models.

0 commit comments

Comments
 (0)