Skip to content

Commit 20044ca

Browse files
Fix log message in scheduler (#652)
1 parent 64f23c2 commit 20044ca

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

vllm/core/scheduler.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -190,13 +190,13 @@ def _schedule(
190190
break
191191

192192
num_prompt_tokens = seq_group.get_seqs()[0].get_len()
193-
if num_prompt_tokens > min(
194-
self.scheduler_config.max_model_len,
195-
self.scheduler_config.max_num_batched_tokens):
193+
prompt_limit = min(
194+
self.scheduler_config.max_model_len,
195+
self.scheduler_config.max_num_batched_tokens)
196+
if num_prompt_tokens > prompt_limit:
196197
logger.warning(
197198
f"Input prompt ({num_prompt_tokens} tokens) is too long"
198-
" and exceeds limit of "
199-
f"{self.scheduler_config.max_model_len}")
199+
f" and exceeds limit of {prompt_limit}")
200200
for seq in seq_group.get_seqs():
201201
seq.status = SequenceStatus.FINISHED_IGNORED
202202
ignored_seq_groups.append(seq_group)

0 commit comments

Comments
 (0)