We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent bab9f6d commit 63424f4Copy full SHA for 63424f4
src/lighteval/models/sglang/sglang_model.py
@@ -274,6 +274,11 @@ def _greedy_until(
274
f"{context_size + max_new_tokens=} which is greater than {self.max_length=}. Truncating context to {self.max_length - max_new_tokens} tokens."
275
)
276
context_size = self.max_length - max_new_tokens
277
+ if context_size < 0:
278
+ logger.critical(
279
+ f"{context_size=} is less than 0, either reduce the max_new_tokens or increase model max length."
280
+ )
281
+ raise ValueError("Context size is less than 0.")
282
inputs = [input[-context_size:] for input in inputs]
283
else:
284
if context_size > self.max_length:
0 commit comments