Skip to content

Commit 3b89734

Browse files
authored
Fixed issue with o1 in litellm. (#493)
1 parent 90d44c1 commit 3b89734

File tree

1 file changed

+7
-4
lines changed

1 file changed

+7
-4
lines changed

src/lighteval/models/litellm_model.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def __init__(self, config, env_config) -> None:
9191
self._tokenizer = encode
9292
self.pairwise_tokenization = False
9393
litellm.drop_params = True
94-
litellm.verbose = True
94+
litellm.set_verbose = False
9595

9696
def _prepare_stop_sequence(self, stop_sequence):
9797
"""Prepare and validate stop sequence."""
@@ -130,13 +130,16 @@ def __call_api(self, prompt, return_logits, max_new_tokens, num_samples, stop_se
130130
"messages": prompt,
131131
"max_completion_tokens": max_new_tokens,
132132
"logprobs": return_logits if self.provider == "openai" else None,
133-
"stop": stop_sequence,
134133
"base_url": self.base_url,
135134
"n": num_samples,
136-
"temperature": self.TEMPERATURE,
137-
"top_p": self.TOP_P,
138135
"caching": True,
139136
}
137+
if "o1" in self.model:
138+
logger.warning("O1 models do not support temperature, top_p, stop sequence. Disabling.")
139+
else:
140+
kwargs["temperature"] = self.TEMPERATURE
141+
kwargs["top_p"] = self.TOP_P
142+
kwargs["stop"] = stop_sequence
140143

141144
response = litellm.completion(**kwargs)
142145

0 commit comments

Comments
 (0)