Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions vllm/model_executor/models/qwen2_vl.py
Original file line number Diff line number Diff line change
Expand Up @@ -823,6 +823,14 @@ def get_image_processor(
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
return {"image": None, "video": None}

def get_max_tokens_per_item(
self, seq_len: int,
mm_counts: Mapping[str, int]) -> Optional[Mapping[str, int]]:

max_image_tokens = self.get_max_image_tokens()
max_video_tokens = self.get_max_video_tokens(seq_len, mm_counts)
return {"image": max_image_tokens, "video": max_video_tokens}
Copy link
Member

@DarkLight1337 DarkLight1337 Jun 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you validate whether the startup time is actually reduced (compared to before this PR) after this latest change?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@DarkLight1337 Yep that's exactly what I'm going to do next


def _get_vision_info(
self,
*,
Expand Down
21 changes: 21 additions & 0 deletions vllm/multimodal/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -1100,6 +1100,27 @@ def get_allowed_mm_limits(self) -> Mapping[str, int]:

return allowed_limits

def get_max_tokens_per_item(
self, seq_len: int,
mm_counts: Optional[Mapping[str,
int]]) -> Optional[Mapping[str, int]]:
"""Return the maximum number of tokens per item of for each modality.
By default, returns `None`. When `None` is returned, vLLM will generate
dummy inputs (images/videos) at maximum possible sizes and process them
to determine the maximum token count per modality.
This approach works but can be very slow for certain models (e.g.,
Qwen2.5-VL), leading to very long startup time. For better performance,
each model can override this method to return pre-computed maximum token
counts, avoiding the need for dummy input generation and processing.

NOTE: The maximum number of tokens per item of each modality returned
from this function should respect to the model maximum sequence length
and the maximum number of items of each modality allowed, and agrees
with dummy inputs (images/videos) at maximum possible sizes.

"""
return None


_I = TypeVar("_I", bound=BaseProcessingInfo)

Expand Down
22 changes: 21 additions & 1 deletion vllm/multimodal/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,26 @@ def get_mm_max_tokens(
seq_len: int,
mm_counts: Optional[Mapping[str, int]] = None,
) -> Mapping[str, int]:
mm_inputs = self._get_dummy_mm_inputs(seq_len, mm_counts)
max_tokens_per_item = self.processing_info.get_max_tokens_per_item(
seq_len=seq_len, mm_counts=mm_counts)
if max_tokens_per_item is not None:
if mm_counts is None:
total_mm_tokens = sum(max_tokens_per_item.values())
else:
total_mm_tokens = sum(max_tokens_per_item[k] * mm_counts[k]
for k in max_tokens_per_item.keys()
& mm_counts.keys())
if total_mm_tokens > seq_len:
logger.warning_once(
"The sequence length (%d) is smaller than the pre-defined"
" wosrt-case total number of multimodal tokens (%d). "
"This may cause certain multi-modal inputs to fail during "
"inference. To avoid this, you should increase "
"`max_model_len` or reduce `mm_counts`.",
seq_len,
total_mm_tokens,
)
return max_tokens_per_item

mm_inputs = self._get_dummy_mm_inputs(seq_len, mm_counts)
return self._get_mm_num_tokens(mm_inputs)