We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 7aaa2bd commit 6364af9Copy full SHA for 6364af9
vllm/multimodal/profiling.py
@@ -275,7 +275,7 @@ def get_mm_max_tokens(
275
if total_mm_tokens > seq_len:
276
logger.warning_once(
277
"The sequence length (%d) is smaller than the pre-defined"
278
- " wosrt-case total number of multimodal tokens (%d). "
+ " worst-case total number of multimodal tokens (%d). "
279
"This may cause certain multi-modal inputs to fail during "
280
"inference. To avoid this, you should increase "
281
"`max_model_len` or reduce `mm_counts`.",
0 commit comments