diff --git a/docs/contributing/model/multimodal.md b/docs/contributing/model/multimodal.md index e123e0dcd155..67cde8df987e 100644 --- a/docs/contributing/model/multimodal.md +++ b/docs/contributing/model/multimodal.md @@ -293,21 +293,22 @@ Assuming that the memory usage increases with the number of tokens, the dummy in self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = \ self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { - "image": - self._get_dummy_images(width=target_width, - height=target_height, - num_images=num_images, - overrides=image_overrides) + "image": self._get_dummy_images( + width=target_width, + height=target_height, + num_images=num_images, + overrides=image_overrides, + ) } ``` @@ -479,17 +480,16 @@ Assuming that the memory usage increases with the number of tokens, the dummy in self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Optional[Mapping[str, BaseDummyOptions]] = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: target_width, target_height = \ self.info.get_image_size_with_most_features() num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { - "image": - self._get_dummy_images( + "image": self._get_dummy_images( width=target_width, height=target_height, num_images=num_images, diff --git a/tests/models/multimodal/processing/test_audioflamingo3.py b/tests/models/multimodal/processing/test_audioflamingo3.py index d7c00516ffea..428fd9c6eabf 100644 --- a/tests/models/multimodal/processing/test_audioflamingo3.py +++ b/tests/models/multimodal/processing/test_audioflamingo3.py @@ -116,7 +116,7 @@ def test_dummy_data_generation(mock_ctx): builder = AudioFlamingo3DummyInputsBuilder(info) mm_counts = {"audio": 2} - dummy_data = builder.get_dummy_mm_data(100, mm_counts, None) + dummy_data = builder.get_dummy_mm_data(100, mm_counts, {}) assert "audio" in dummy_data assert len(dummy_data["audio"]) == 2 diff --git a/vllm/config/multimodal.py b/vllm/config/multimodal.py index 0a867f1c8537..f95a2e140c67 100644 --- a/vllm/config/multimodal.py +++ b/vllm/config/multimodal.py @@ -2,7 +2,7 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Mapping -from typing import Any, Literal, TypeAlias +from typing import Any, Literal, TypeAlias, TypedDict, final from pydantic import ConfigDict, Field, field_validator, model_validator from pydantic.dataclasses import dataclass @@ -43,11 +43,29 @@ class AudioDummyOptions(BaseDummyOptions): length: int | None = Field(None, gt=0) +@final +class MultiModalDummyOptionsBuiltins(TypedDict, total=False): + """Type annotations for modality types predefined by vLLM.""" + + image: ImageDummyOptions + """Options for dummy images.""" + + video: VideoDummyOptions + """Options for dummy videos.""" + + audio: AudioDummyOptions + """Options for dummy audios.""" + + MMEncoderTPMode = Literal["weights", "data"] MMCacheType = Literal["shm", "lru"] -DummyOptions: TypeAlias = ( - BaseDummyOptions | VideoDummyOptions | ImageDummyOptions | AudioDummyOptions -) +MMDummyOptions: TypeAlias = dict[str, BaseDummyOptions] +""" +A dictionary containing an entry for each modality type of dummy data. + +The built-in modalities are defined by +[`MultiModalDummyOptionsBuiltins`][vllm.config.multimodal.MultiModalDummyOptionsBuiltins]. +""" @config @@ -57,7 +75,7 @@ class MultiModalConfig: language_model_only: bool = False """If True, disables all multimodal inputs by setting all modality limits to 0. Equivalent to setting `--limit-mm-per-prompt` to 0 for every modality.""" - limit_per_prompt: dict[str, DummyOptions] = Field(default_factory=dict) + limit_per_prompt: MMDummyOptions = Field(default_factory=dict) """The maximum number of input items and options allowed per prompt for each modality. @@ -158,22 +176,27 @@ class MultiModalConfig: @field_validator("limit_per_prompt", mode="before") @classmethod def _validate_limit_per_prompt( - cls, value: dict[str, int | dict[str, int]] - ) -> dict[str, DummyOptions]: + cls, + value: dict[str, int | dict[str, int]], + ) -> MMDummyOptions: + out: MMDummyOptions = {} + for k, v in value.items(): # Handle legacy format where only count is specified if isinstance(v, int): v = {"count": v} + # Convert to the appropriate DummyOptions subclass if k == "video": - value[k] = VideoDummyOptions(**v) + out[k] = VideoDummyOptions(**v) elif k == "image": - value[k] = ImageDummyOptions(**v) + out[k] = ImageDummyOptions(**v) elif k == "audio": - value[k] = AudioDummyOptions(**v) + out[k] = AudioDummyOptions(**v) else: - value[k] = BaseDummyOptions(**v) - return value + out[k] = BaseDummyOptions(**v) + + return out @field_validator("mm_encoder_attn_backend", mode="before") @classmethod @@ -240,15 +263,8 @@ def get_limit_per_prompt(self, modality: str) -> int: if limit_data is None: # Unspecified modality is set to 999 by default return 999 - return limit_data.count - def get_dummy_options(self, modality: str) -> BaseDummyOptions | None: - """ - Get the configurable dummy data options for a modality. - Returns None if no options are configured for this modality. - """ - # All values are now DummyOptions after normalization - return self.limit_per_prompt.get(modality) + return limit_data.count def merge_mm_processor_kwargs( self, diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index fc1720296057..908581786450 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -444,15 +444,14 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: vision_config = self.info.get_vision_config() max_image_size = vision_config.image_size num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/audioflamingo3.py b/vllm/model_executor/models/audioflamingo3.py index 111b99461b2c..e56997fb7267 100644 --- a/vllm/model_executor/models/audioflamingo3.py +++ b/vllm/model_executor/models/audioflamingo3.py @@ -252,16 +252,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: - feature_extractor = self.info.get_feature_extractor( - **(mm_processor_kwargs or {}) - ) + feature_extractor = self.info.get_feature_extractor() sampling_rate = feature_extractor.sampling_rate audio_len = MAX_AUDIO_LEN * sampling_rate num_audios = mm_counts.get("audio", 0) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( diff --git a/vllm/model_executor/models/aya_vision.py b/vllm/model_executor/models/aya_vision.py index ce3b990c3ae4..c1806beec108 100644 --- a/vllm/model_executor/models/aya_vision.py +++ b/vllm/model_executor/models/aya_vision.py @@ -191,13 +191,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) image_size = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/bagel.py b/vllm/model_executor/models/bagel.py index 657e8cefb4e4..425342e8b78b 100644 --- a/vllm/model_executor/models/bagel.py +++ b/vllm/model_executor/models/bagel.py @@ -249,8 +249,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) hf_config = self.info.get_hf_config() @@ -258,7 +257,7 @@ def get_dummy_mm_data( # Use the configured image size image_size = vit_config.image_size - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/bee.py b/vllm/model_executor/models/bee.py index 5c3a1a4f1f48..ecb645edf4a5 100644 --- a/vllm/model_executor/models/bee.py +++ b/vllm/model_executor/models/bee.py @@ -90,14 +90,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index fe9db19ea6f3..8f79c1aaee0d 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -445,8 +445,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: hf_config = self.info.get_hf_config() vision_config = hf_config.vision_config @@ -454,7 +453,7 @@ def get_dummy_mm_data( max_image_size = vision_config.image_size num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 2c21d70ed1fd..e09a4eac7261 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -116,15 +116,14 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: config = self.info.get_hf_config() width = height = config.vq_config.resolution num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/clip.py b/vllm/model_executor/models/clip.py index 556c68fc17f5..63c84e890e4f 100644 --- a/vllm/model_executor/models/clip.py +++ b/vllm/model_executor/models/clip.py @@ -174,14 +174,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/cohere2_vision.py b/vllm/model_executor/models/cohere2_vision.py index 1bcdd41b31a4..69b2abb5fd58 100644 --- a/vllm/model_executor/models/cohere2_vision.py +++ b/vllm/model_executor/models/cohere2_vision.py @@ -197,13 +197,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) image_size = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/deepseek_ocr.py b/vllm/model_executor/models/deepseek_ocr.py index 8293d2eced83..b0fba01a4670 100644 --- a/vllm/model_executor/models/deepseek_ocr.py +++ b/vllm/model_executor/models/deepseek_ocr.py @@ -255,8 +255,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) diff --git a/vllm/model_executor/models/deepseek_ocr2.py b/vllm/model_executor/models/deepseek_ocr2.py index 6ababf9f22bf..b57aeeabd4ac 100644 --- a/vllm/model_executor/models/deepseek_ocr2.py +++ b/vllm/model_executor/models/deepseek_ocr2.py @@ -137,8 +137,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) diff --git a/vllm/model_executor/models/deepseek_vl2.py b/vllm/model_executor/models/deepseek_vl2.py index e0de49fb6eae..79279b9d5df7 100644 --- a/vllm/model_executor/models/deepseek_vl2.py +++ b/vllm/model_executor/models/deepseek_vl2.py @@ -214,14 +214,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) max_image_size = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/dots_ocr.py b/vllm/model_executor/models/dots_ocr.py index 4d8acb082592..25b4087d3d9c 100644 --- a/vllm/model_executor/models/dots_ocr.py +++ b/vllm/model_executor/models/dots_ocr.py @@ -106,17 +106,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) - mm_processor_kwargs = mm_processor_kwargs or {} - target_width, target_height = self.info.get_image_size_with_most_features( # noqa: E501 - mm_processor_kwargs.get("max_pixels", None) - ) + target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/ernie45_vl.py b/vllm/model_executor/models/ernie45_vl.py index ab1386e08bc8..1df4adfac159 100644 --- a/vllm/model_executor/models/ernie45_vl.py +++ b/vllm/model_executor/models/ernie45_vl.py @@ -1168,8 +1168,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) @@ -1179,8 +1178,8 @@ def get_dummy_mm_data( seq_len, mm_counts ) - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/funasr.py b/vllm/model_executor/models/funasr.py index a1c70e10e79b..25ede72f1fff 100644 --- a/vllm/model_executor/models/funasr.py +++ b/vllm/model_executor/models/funasr.py @@ -746,23 +746,22 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: - feature_extractor = self.info.get_feature_extractor( - **(mm_processor_kwargs or {}) - ) + feature_extractor = self.info.get_feature_extractor() sampling_rate = feature_extractor.sampling_rate audio_len = feature_extractor.chunk_length * sampling_rate num_audios = mm_counts.get("audio", 0) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( - length=audio_len, num_audios=num_audios, overrides=audio_overrides - ) + length=audio_len, + num_audios=num_audios, + overrides=audio_overrides, + ), } diff --git a/vllm/model_executor/models/funaudiochat.py b/vllm/model_executor/models/funaudiochat.py index a89a5c104a99..5bcb49e075b3 100644 --- a/vllm/model_executor/models/funaudiochat.py +++ b/vllm/model_executor/models/funaudiochat.py @@ -610,12 +610,9 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: - feature_extractor = self.info.get_feature_extractor( - **(mm_processor_kwargs or {}) - ) + feature_extractor = self.info.get_feature_extractor() sampling_rate = int(feature_extractor.sampling_rate) # Dummy inputs are used for profiling; construct the worst-case audio @@ -632,7 +629,7 @@ def get_dummy_mm_data( ) num_audios = int(mm_counts.get("audio", 0)) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( length=audio_len, diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index c4f1118f73d0..cc15cee59cfd 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -142,13 +142,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: target_width, target_height = self.info.get_image_size_with_most_features() num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/gemma3_mm.py b/vllm/model_executor/models/gemma3_mm.py index d0a326ccd0be..83a1ae52e29b 100644 --- a/vllm/model_executor/models/gemma3_mm.py +++ b/vllm/model_executor/models/gemma3_mm.py @@ -241,14 +241,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/gemma3n_mm.py b/vllm/model_executor/models/gemma3n_mm.py index 3e4745f7c928..ab5d4ae46d65 100644 --- a/vllm/model_executor/models/gemma3n_mm.py +++ b/vllm/model_executor/models/gemma3n_mm.py @@ -175,8 +175,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_audios = mm_counts.get("audio", 0) @@ -189,8 +188,8 @@ def get_dummy_mm_data( img_width = image_processor.size.get("width", 224) img_height = image_processor.size.get("height", 224) - image_overrides = mm_options.get("image") if mm_options else None - audio_overrides = mm_options.get("audio") if mm_options else None + image_overrides = mm_options.get("image") + audio_overrides = mm_options.get("audio") return { "image": self._get_dummy_images( @@ -200,7 +199,9 @@ def get_dummy_mm_data( overrides=image_overrides, ), "audio": self._get_dummy_audios( - length=audio_len, num_audios=num_audios, overrides=audio_overrides + length=audio_len, + num_audios=num_audios, + overrides=audio_overrides, ), } diff --git a/vllm/model_executor/models/glm4_1v.py b/vllm/model_executor/models/glm4_1v.py index a85d5e6f9f4d..ff76a26bbf0f 100644 --- a/vllm/model_executor/models/glm4_1v.py +++ b/vllm/model_executor/models/glm4_1v.py @@ -1163,8 +1163,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) @@ -1174,8 +1173,8 @@ def get_dummy_mm_data( seq_len, mm_counts ) - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/glm4v.py b/vllm/model_executor/models/glm4v.py index 4d86900e9f92..3513419cb7af 100644 --- a/vllm/model_executor/models/glm4v.py +++ b/vllm/model_executor/models/glm4v.py @@ -492,8 +492,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: hf_config = self.info.get_hf_config() vision_config = hf_config.vision_config @@ -501,7 +500,7 @@ def get_dummy_mm_data( target_width = target_height = vision_config["image_size"] num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/glmasr.py b/vllm/model_executor/models/glmasr.py index b7d67b1e49bb..fd47a014a8c1 100644 --- a/vllm/model_executor/models/glmasr.py +++ b/vllm/model_executor/models/glmasr.py @@ -726,15 +726,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: - feature_extractor = self.info.get_feature_extractor( - **(mm_processor_kwargs or {}) - ) + feature_extractor = self.info.get_feature_extractor() sampling_rate = feature_extractor.sampling_rate num_audios = mm_counts.get("audio", 0) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") max_audio_len = getattr( self.info.get_hf_processor(), "max_audio_len", DEFAULT_MAX_AUDIO_LEN_S @@ -743,7 +740,9 @@ def get_dummy_mm_data( return { "audio": self._get_dummy_audios( - length=audio_len, num_audios=num_audios, overrides=audio_overrides + length=audio_len, + num_audios=num_audios, + overrides=audio_overrides, ) } diff --git a/vllm/model_executor/models/granite_speech.py b/vllm/model_executor/models/granite_speech.py index 9d37a068385d..393a2be343e0 100644 --- a/vllm/model_executor/models/granite_speech.py +++ b/vllm/model_executor/models/granite_speech.py @@ -216,11 +216,10 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_audios = mm_counts.get("audio", 0) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( diff --git a/vllm/model_executor/models/hunyuan_vision.py b/vllm/model_executor/models/hunyuan_vision.py index 50b6bd427701..ac62136b48e0 100644 --- a/vllm/model_executor/models/hunyuan_vision.py +++ b/vllm/model_executor/models/hunyuan_vision.py @@ -590,7 +590,7 @@ def get_hf_processor( self, **kwargs: object, ) -> HunYuanVLProcessor: - return self.ctx.get_hf_processor( + return self.ctx.init_processor( HunYuanVLProcessor, use_fast=kwargs.pop("use_fast", True), **kwargs, @@ -713,8 +713,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 1) diff --git a/vllm/model_executor/models/hyperclovax_vision.py b/vllm/model_executor/models/hyperclovax_vision.py index ea10d764f0f4..1fb0d5e5dd76 100644 --- a/vllm/model_executor/models/hyperclovax_vision.py +++ b/vllm/model_executor/models/hyperclovax_vision.py @@ -165,8 +165,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) @@ -174,8 +173,8 @@ def get_dummy_mm_data( target_width, target_height = self.info.get_image_size_with_most_features() target_num_frames = 32 - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index 434bc7318b92..a59c4565499c 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -277,15 +277,14 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) - hf_processor = self.info.get_hf_processor(**(mm_processor_kwargs or {})) + hf_processor = self.info.get_hf_processor() image_processor: Idefics3ImageProcessor = hf_processor.image_processor longest_edge = image_processor.max_image_size["longest_edge"] - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/interns1.py b/vllm/model_executor/models/interns1.py index 5e973aa831ce..549f3ee5499f 100644 --- a/vllm/model_executor/models/interns1.py +++ b/vllm/model_executor/models/interns1.py @@ -297,8 +297,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: target_width, target_height = self.info.get_image_size_with_most_features() target_num_frames = self.info.get_num_frames_with_most_features( @@ -310,8 +309,8 @@ def get_dummy_mm_data( config = self.info.get_hf_config() image_size_h, image_size_w = config.vision_config.image_size - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 7fbbb7237ae0..a696d2129c28 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -762,13 +762,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: target_width, target_height = self.info.get_image_size_with_most_features() num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( @@ -935,12 +934,9 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: - dummy_image = super().get_dummy_mm_data( - seq_len=seq_len, mm_counts=mm_counts, mm_options=mm_options - ) + dummy_image = super().get_dummy_mm_data(seq_len, mm_counts, mm_options) if self.info.supports_video: config = self.info.get_hf_config() image_size: int = config.vision_config.image_size @@ -948,7 +944,7 @@ def get_dummy_mm_data( seq_len, mm_counts ) num_videos = mm_counts.get("video", 0) - video_overrides = mm_options.get("video") if mm_options else None + video_overrides = mm_options.get("video") dummy_video = { "video": self._get_dummy_videos( width=image_size, diff --git a/vllm/model_executor/models/isaac.py b/vllm/model_executor/models/isaac.py index 8ed9ddda4025..f4f7ce45908c 100644 --- a/vllm/model_executor/models/isaac.py +++ b/vllm/model_executor/models/isaac.py @@ -18,6 +18,7 @@ from vllm.config import VllmConfig from vllm.config.model import ModelConfig +from vllm.config.multimodal import BaseDummyOptions from vllm.distributed import parallel_state from vllm.distributed import utils as dist_utils from vllm.model_executor.layers.attention import MMEncoderAttention @@ -849,13 +850,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/kanana_v.py b/vllm/model_executor/models/kanana_v.py index b679241b51e5..991fa28d9b7e 100644 --- a/vllm/model_executor/models/kanana_v.py +++ b/vllm/model_executor/models/kanana_v.py @@ -444,8 +444,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) return { diff --git a/vllm/model_executor/models/keye.py b/vllm/model_executor/models/keye.py index 2ae044c287f6..2cb7dc42539d 100644 --- a/vllm/model_executor/models/keye.py +++ b/vllm/model_executor/models/keye.py @@ -1170,8 +1170,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) @@ -1179,8 +1178,8 @@ def get_dummy_mm_data( target_width, target_height = self.info.get_image_size_with_most_features() target_num_frames = self.info.get_num_frames_with_most_features(seq_len) - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") mm_data = { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/kimi_k25.py b/vllm/model_executor/models/kimi_k25.py index 9d287ba9bac6..248339337fa9 100644 --- a/vllm/model_executor/models/kimi_k25.py +++ b/vllm/model_executor/models/kimi_k25.py @@ -240,8 +240,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: # TODO: Support mm_options for vision_chunk to allow user configuration dummy_items = self.get_dummy_mm_items() diff --git a/vllm/model_executor/models/kimi_vl.py b/vllm/model_executor/models/kimi_vl.py index e280f8245b9a..5da8ef980c49 100644 --- a/vllm/model_executor/models/kimi_vl.py +++ b/vllm/model_executor/models/kimi_vl.py @@ -215,12 +215,11 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/lfm2_vl.py b/vllm/model_executor/models/lfm2_vl.py index 3355e4016554..86cd5546bd0d 100644 --- a/vllm/model_executor/models/lfm2_vl.py +++ b/vllm/model_executor/models/lfm2_vl.py @@ -343,14 +343,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index c8ca1815d7b1..e6eb268d6776 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -232,14 +232,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index 6696a0009cd9..54558e123fc9 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -165,8 +165,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_videos = mm_counts.get("video", 0) @@ -175,7 +174,7 @@ def get_dummy_mm_data( seq_len, mm_counts ) - video_overrides = mm_options.get("video") if mm_options else None + video_overrides = mm_options.get("video") return { "video": self._get_dummy_videos( diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index 290ace8bff69..f747df09c39f 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -276,8 +276,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) @@ -287,8 +286,8 @@ def get_dummy_mm_data( seq_len, mm_counts ) - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/midashenglm.py b/vllm/model_executor/models/midashenglm.py index 4bba0ad71517..08b955c81562 100644 --- a/vllm/model_executor/models/midashenglm.py +++ b/vllm/model_executor/models/midashenglm.py @@ -565,12 +565,11 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_audios = mm_counts.get("audio", 0) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( diff --git a/vllm/model_executor/models/minicpmo.py b/vllm/model_executor/models/minicpmo.py index 33df0f7854bd..f176e50f8840 100644 --- a/vllm/model_executor/models/minicpmo.py +++ b/vllm/model_executor/models/minicpmo.py @@ -301,8 +301,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_audios = mm_counts.get("audio", 0) audio_len = ( @@ -310,11 +309,13 @@ def get_dummy_mm_data( * self.info.get_default_audio_sampling_rate() ) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") audio_mm_data = { "audio": self._get_dummy_audios( - length=audio_len, num_audios=num_audios, overrides=audio_overrides + length=audio_len, + num_audios=num_audios, + overrides=audio_overrides, ) } diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index 6a1686100b39..784a03a60834 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -707,8 +707,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) @@ -719,8 +718,8 @@ def get_dummy_mm_data( seq_len, mm_counts ) - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/mistral3.py b/vllm/model_executor/models/mistral3.py index 33d94e9ff6ff..787fdf9000c1 100644 --- a/vllm/model_executor/models/mistral3.py +++ b/vllm/model_executor/models/mistral3.py @@ -236,14 +236,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/mllama4.py b/vllm/model_executor/models/mllama4.py index 6b3ca695ac62..b08810892006 100644 --- a/vllm/model_executor/models/mllama4.py +++ b/vllm/model_executor/models/mllama4.py @@ -707,14 +707,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) (target_width, target_height) = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index b3689ed19262..ba6d569b7674 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -1274,13 +1274,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: target_width, target_height = self.info.get_image_size_with_most_features() num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/molmo2.py b/vllm/model_executor/models/molmo2.py index d32c034b5ca5..b2e91616a986 100644 --- a/vllm/model_executor/models/molmo2.py +++ b/vllm/model_executor/models/molmo2.py @@ -2082,8 +2082,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) @@ -2094,7 +2093,7 @@ def get_dummy_mm_data( if num_images > 0: target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") dummy_images = self._get_dummy_images( width=target_width, @@ -2110,7 +2109,7 @@ def get_dummy_mm_data( seq_len, mm_counts ) - video_overrides = mm_options.get("video") if mm_options else None + video_overrides = mm_options.get("video") if video_overrides: assert isinstance(video_overrides, VideoDummyOptions) diff --git a/vllm/model_executor/models/nano_nemotron_vl.py b/vllm/model_executor/models/nano_nemotron_vl.py index b4c5f6e6439d..46cf7fe97829 100644 --- a/vllm/model_executor/models/nano_nemotron_vl.py +++ b/vllm/model_executor/models/nano_nemotron_vl.py @@ -1388,8 +1388,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) processor = self.info.get_hf_processor() @@ -1404,7 +1403,7 @@ def get_dummy_mm_data( max_num_tiles ) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( @@ -1461,12 +1460,9 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: - dummy_image = super().get_dummy_mm_data( - seq_len=seq_len, mm_counts=mm_counts, mm_options=mm_options - ) + dummy_image = super().get_dummy_mm_data(seq_len, mm_counts, mm_options) if self.info.supports_video: config = self.info.get_hf_config() image_size: int = config.force_image_size @@ -1474,7 +1470,7 @@ def get_dummy_mm_data( seq_len, mm_counts ) num_videos = mm_counts.get("video", 0) - video_overrides = mm_options.get("video") if mm_options else None + video_overrides = mm_options.get("video") dummy_video = { "video": self._get_dummy_videos( width=image_size, diff --git a/vllm/model_executor/models/nemotron_parse.py b/vllm/model_executor/models/nemotron_parse.py index 813675a9237f..fc300a2f9ec6 100644 --- a/vllm/model_executor/models/nemotron_parse.py +++ b/vllm/model_executor/models/nemotron_parse.py @@ -645,8 +645,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) diff --git a/vllm/model_executor/models/nvlm_d.py b/vllm/model_executor/models/nvlm_d.py index 84091895371b..ead24a4e9aa1 100644 --- a/vllm/model_executor/models/nvlm_d.py +++ b/vllm/model_executor/models/nvlm_d.py @@ -92,13 +92,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: target_width, target_height = self.info.get_image_size_with_most_features() num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/ovis.py b/vllm/model_executor/models/ovis.py index 990197cc6744..2807c634b977 100644 --- a/vllm/model_executor/models/ovis.py +++ b/vllm/model_executor/models/ovis.py @@ -306,14 +306,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") mm_data = { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/ovis2_5.py b/vllm/model_executor/models/ovis2_5.py index 9f2098a95281..2d9385c572a0 100644 --- a/vllm/model_executor/models/ovis2_5.py +++ b/vllm/model_executor/models/ovis2_5.py @@ -287,8 +287,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) @@ -298,8 +297,8 @@ def get_dummy_mm_data( seq_len, mm_counts ) - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") mm_data = { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/paddleocr_vl.py b/vllm/model_executor/models/paddleocr_vl.py index 2bbe7e850431..6c9304101fba 100644 --- a/vllm/model_executor/models/paddleocr_vl.py +++ b/vllm/model_executor/models/paddleocr_vl.py @@ -206,13 +206,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) max_image_size = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index 37beaffef624..458bcfa3c3a1 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -131,8 +131,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: hf_config = self.info.get_hf_config() vision_config = hf_config.vision_config @@ -140,7 +139,7 @@ def get_dummy_mm_data( num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index a5a346e72298..1466e3861184 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -376,14 +376,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/phi4mm.py b/vllm/model_executor/models/phi4mm.py index 89676a9a71ac..5ccac92e35dd 100644 --- a/vllm/model_executor/models/phi4mm.py +++ b/vllm/model_executor/models/phi4mm.py @@ -822,16 +822,15 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_audios = mm_counts.get("audio", 0) num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None - audio_overrides = mm_options.get("audio") if mm_options else None + image_overrides = mm_options.get("image") + audio_overrides = mm_options.get("audio") mm_data = { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 0cfa8b6a3a84..ae714dea28b6 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -249,14 +249,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( @@ -271,8 +270,7 @@ def get_dummy_processor_inputs( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> ProcessorInputs: tokenizer = self.info.get_tokenizer() diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index 974de80689c4..977b522b5ebf 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -357,15 +357,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_audios = mm_counts.get("audio", 0) num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) - mm_processor_kwargs = mm_processor_kwargs or {} - feature_extractor = self.info.get_feature_extractor(**mm_processor_kwargs) + feature_extractor = self.info.get_feature_extractor() target_audio_length = ( min( @@ -375,16 +373,14 @@ def get_dummy_mm_data( * feature_extractor.sampling_rate ) - target_width, target_height = self.info.get_image_size_with_most_features( - max_pixels=mm_processor_kwargs.get("max_pixels", None), - ) + target_width, target_height = self.info.get_image_size_with_most_features() target_num_frames = self.info.get_num_frames_with_most_features( seq_len, mm_counts ) - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None - audio_overrides = mm_options.get("audio") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") + audio_overrides = mm_options.get("audio") mm_data = { "audio": self._get_dummy_audios( diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index 52c798e83f1e..053e8bb85a3f 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -195,22 +195,21 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: - feature_extractor = self.info.get_feature_extractor( - **(mm_processor_kwargs or {}) - ) + feature_extractor = self.info.get_feature_extractor() sampling_rate = feature_extractor.sampling_rate audio_len = feature_extractor.chunk_length * sampling_rate num_audios = mm_counts.get("audio", 0) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( - length=audio_len, num_audios=num_audios, overrides=audio_overrides + length=audio_len, + num_audios=num_audios, + overrides=audio_overrides, ) } diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index c530493b1df1..eed559bcb87c 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -925,9 +925,14 @@ def get_image_size_with_most_features( vision_config = hf_config.vision_config patch_size = vision_config.patch_size merge_size = vision_config.spatial_merge_size + if max_pixels is None: image_processor = self.get_image_processor() - max_pixels = image_processor.size["longest_edge"] + + mm_kwargs = self.ctx.get_merged_mm_kwargs({}) + size = mm_kwargs.get("size", image_processor.size) + max_pixels = size["longest_edge"] + unit = patch_size * merge_size max_seq_len = max_pixels // (unit * unit) @@ -1027,22 +1032,18 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) - mm_processor_kwargs = mm_processor_kwargs or {} - target_width, target_height = self.info.get_image_size_with_most_features( - max_pixels=mm_processor_kwargs.get("max_pixels", None) - ) + target_width, target_height = self.info.get_image_size_with_most_features() target_num_frames = self.info.get_num_frames_with_most_features( seq_len, mm_counts ) - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/qwen3_asr.py b/vllm/model_executor/models/qwen3_asr.py index 5f56088cb31f..443da955dcc6 100644 --- a/vllm/model_executor/models/qwen3_asr.py +++ b/vllm/model_executor/models/qwen3_asr.py @@ -146,14 +146,11 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_audios = mm_counts.get("audio", 0) - feature_extractor = self.info.get_feature_extractor( - **(mm_processor_kwargs or {}) - ) + feature_extractor = self.info.get_feature_extractor() target_audio_length = ( min( @@ -163,7 +160,7 @@ def get_dummy_mm_data( * feature_extractor.sampling_rate ) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( diff --git a/vllm/model_executor/models/qwen3_vl.py b/vllm/model_executor/models/qwen3_vl.py index abb38a648973..1a017e56161e 100644 --- a/vllm/model_executor/models/qwen3_vl.py +++ b/vllm/model_executor/models/qwen3_vl.py @@ -703,11 +703,18 @@ def get_max_video_tokens( mm_counts: Mapping[str, int], ) -> int: video_processor = self.get_video_processor() - video_max_pixels = video_processor.size["longest_edge"] + + mm_kwargs = self.ctx.get_merged_mm_kwargs({}) + video_size = mm_kwargs.get("size", video_processor.size) + temporal_patch_size = mm_kwargs.get( + "temporal_patch_size", video_processor.temporal_patch_size + ) + # video_max_pixels contains the temporal compression factor, # so we divide by 2 to get the maximum number of image pixels. + video_max_pixels = video_size["longest_edge"] target_width, target_height = self.get_image_size_with_most_features( - max_pixels=video_max_pixels // video_processor.temporal_patch_size + max_pixels=video_max_pixels // temporal_patch_size ) num_video_soft_tokens = self.get_num_video_tokens( image_width=target_width, @@ -789,19 +796,15 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) num_videos = mm_counts.get("video", 0) - image_overrides = mm_options.get("image") if mm_options else None - video_overrides = mm_options.get("video") if mm_options else None + image_overrides = mm_options.get("image") + video_overrides = mm_options.get("video") - mm_processor_kwargs = mm_processor_kwargs or {} target_image_width, target_image_height = ( - self.info.get_image_size_with_most_features( - max_pixels=mm_processor_kwargs.get("max_pixels", None), - ) + self.info.get_image_size_with_most_features() ) # treat videos as special images @@ -826,13 +829,20 @@ def get_dummy_mm_data( target_num_frames = min(target_num_frames, num_frames_override) target_num_frames = max(target_num_frames, 2) - video_processor = self.info.get_video_processor(**(mm_processor_kwargs or {})) - video_max_pixels = video_processor.size["longest_edge"] + video_processor = self.info.get_video_processor() + + mm_kwargs = self.info.ctx.get_merged_mm_kwargs({}) + video_size = mm_kwargs.get("size", video_processor.size) + temporal_patch_size = mm_kwargs.get( + "temporal_patch_size", video_processor.temporal_patch_size + ) + # video_max_pixels contains the temporal compression factor, # so we divide by 2 to get the maximum number of image pixels. + video_max_pixels = video_size["longest_edge"] target_video_width, target_video_height = ( self.info.get_image_size_with_most_features( - max_pixels=video_max_pixels // video_processor.temporal_patch_size + max_pixels=video_max_pixels // temporal_patch_size ) ) target_video_size, _ = self.info._get_vision_info( diff --git a/vllm/model_executor/models/qwen_vl.py b/vllm/model_executor/models/qwen_vl.py index 66b669a9cc36..8ac541f73d8b 100644 --- a/vllm/model_executor/models/qwen_vl.py +++ b/vllm/model_executor/models/qwen_vl.py @@ -617,8 +617,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: hf_config = self.info.get_hf_config() vision_config = hf_config.visual @@ -626,7 +625,7 @@ def get_dummy_mm_data( target_width = target_height = vision_config["image_size"] num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/rvl.py b/vllm/model_executor/models/rvl.py index f6ddaa8fadda..72f68659c72b 100644 --- a/vllm/model_executor/models/rvl.py +++ b/vllm/model_executor/models/rvl.py @@ -40,14 +40,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/siglip.py b/vllm/model_executor/models/siglip.py index 8e07a90e893e..c315151308c6 100644 --- a/vllm/model_executor/models/siglip.py +++ b/vllm/model_executor/models/siglip.py @@ -158,14 +158,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_image_size_with_most_features() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/skyworkr1v.py b/vllm/model_executor/models/skyworkr1v.py index acedb04bcb9e..128428746386 100644 --- a/vllm/model_executor/models/skyworkr1v.py +++ b/vllm/model_executor/models/skyworkr1v.py @@ -472,7 +472,7 @@ def __call__( class SkyworkR1VProcessingInfo(BaseProcessingInfo): def get_hf_processor(self, **kwargs: object) -> SkyworkR1VProcessor: - return self.ctx.init_processor( + return self.ctx.get_hf_processor( SkyworkR1VProcessor, config=self.get_hf_config(), tokenizer=self.get_tokenizer(), @@ -529,13 +529,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: target_width, target_height = self.info.get_image_size_with_most_features() num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/step3_vl.py b/vllm/model_executor/models/step3_vl.py index 8050f6b850d9..eee1130ccd12 100644 --- a/vllm/model_executor/models/step3_vl.py +++ b/vllm/model_executor/models/step3_vl.py @@ -564,13 +564,12 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: target_width, target_height = self.info.get_image_size_with_most_features() num_images = mm_counts.get("image", 0) - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/terratorch.py b/vllm/model_executor/models/terratorch.py index 1cf65abd649e..a3a4030af818 100644 --- a/vllm/model_executor/models/terratorch.py +++ b/vllm/model_executor/models/terratorch.py @@ -154,8 +154,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: # Dummy data is generated based on the 'input' section # defined in the HF configuration file diff --git a/vllm/model_executor/models/transformers/multimodal.py b/vllm/model_executor/models/transformers/multimodal.py index 3b1eb7db8cca..a645679e0cd6 100644 --- a/vllm/model_executor/models/transformers/multimodal.py +++ b/vllm/model_executor/models/transformers/multimodal.py @@ -101,14 +101,13 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, "BaseDummyOptions"] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, "BaseDummyOptions"], ) -> MultiModalDataDict: num_images = mm_counts.get("image", 0) target_width, target_height = self.info.get_max_image_size() - image_overrides = mm_options.get("image") if mm_options else None + image_overrides = mm_options.get("image") return { "image": self._get_dummy_images( diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index cf8267d2077b..4ac6361102d9 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -164,12 +164,9 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: - feature_extractor = self.info.get_feature_extractor( - **(mm_processor_kwargs or {}) - ) + feature_extractor = self.info.get_feature_extractor() sampling_rate = feature_extractor.sampling_rate audio_len = ( @@ -177,11 +174,13 @@ def get_dummy_mm_data( ) num_audios = mm_counts.get("audio", 0) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( - length=audio_len, num_audios=num_audios, overrides=audio_overrides + length=audio_len, + num_audios=num_audios, + overrides=audio_overrides, ) } diff --git a/vllm/model_executor/models/voxtral.py b/vllm/model_executor/models/voxtral.py index a4dcc1b413c1..8cbba09d46b0 100644 --- a/vllm/model_executor/models/voxtral.py +++ b/vllm/model_executor/models/voxtral.py @@ -218,18 +218,19 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: num_audios = mm_counts.get("audio", 0) target_length = self.info.get_max_audio_array_len() - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( - length=target_length, num_audios=num_audios, overrides=audio_overrides + length=target_length, + num_audios=num_audios, + overrides=audio_overrides, ) } @@ -237,8 +238,7 @@ def get_dummy_processor_inputs( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> ProcessorInputs: tokenizer = self.info.get_tokenizer() diff --git a/vllm/model_executor/models/whisper.py b/vllm/model_executor/models/whisper.py index 96818e264fbf..2f7c4580ac68 100644 --- a/vllm/model_executor/models/whisper.py +++ b/vllm/model_executor/models/whisper.py @@ -695,22 +695,21 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: - feature_extractor = self.info.get_feature_extractor( - **(mm_processor_kwargs or {}) - ) + feature_extractor = self.info.get_feature_extractor() sampling_rate = feature_extractor.sampling_rate audio_len = feature_extractor.chunk_length * sampling_rate num_audios = mm_counts.get("audio", 0) - audio_overrides = mm_options.get("audio") if mm_options else None + audio_overrides = mm_options.get("audio") return { "audio": self._get_dummy_audios( - length=audio_len, num_audios=num_audios, overrides=audio_overrides + length=audio_len, + num_audios=num_audios, + overrides=audio_overrides, ) } diff --git a/vllm/multimodal/processing/context.py b/vllm/multimodal/processing/context.py index b131ee3c49a8..6f4ce77bccbe 100644 --- a/vllm/multimodal/processing/context.py +++ b/vllm/multimodal/processing/context.py @@ -266,11 +266,14 @@ def get_hf_processor( if isinstance(tokenizer, MistralTokenizer): tokenizer = tokenizer.transformers_tokenizer + merged_kwargs = self.get_merged_mm_kwargs(kwargs) + merged_kwargs.pop("tokenizer", None) + return cached_processor_from_config( self.model_config, processor_cls=typ, tokenizer=tokenizer, - **kwargs, + **merged_kwargs, ) def init_processor( @@ -283,12 +286,7 @@ def init_processor( Initialize a HuggingFace-like processor class, merging the keyword arguments with those in the model's configuration. """ - mm_config = self.model_config.get_multimodal_config() - base_kwargs = mm_config.mm_processor_kwargs - if base_kwargs is None: - base_kwargs = {} - - merged_kwargs = {**base_kwargs, **kwargs} + merged_kwargs = self.get_merged_mm_kwargs(kwargs) return typ(**merged_kwargs) diff --git a/vllm/multimodal/processing/dummy_inputs.py b/vllm/multimodal/processing/dummy_inputs.py index 0b02861e321f..914395863ced 100644 --- a/vllm/multimodal/processing/dummy_inputs.py +++ b/vllm/multimodal/processing/dummy_inputs.py @@ -62,8 +62,7 @@ def get_dummy_mm_data( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> MultiModalDataDict: """ Build the multimodal input which, after processing, results in @@ -83,8 +82,7 @@ def get_dummy_processor_inputs( self, seq_len: int, mm_counts: Mapping[str, int], - mm_options: Mapping[str, BaseDummyOptions] | None = None, - mm_processor_kwargs: Mapping[str, object] | None = None, + mm_options: Mapping[str, BaseDummyOptions], ) -> ProcessorInputs: """ Build the input which, after processing, results in @@ -94,16 +92,9 @@ def get_dummy_processor_inputs( seq_len: Sequence length mm_counts: Count of items per modality mm_options: Configurable options per modality (optional) - mm_processor_kwargs: Additional keyword arguments - for hf_processor (optional) """ dummy_text = self.get_dummy_text(mm_counts) - dummy_mm_data = self.get_dummy_mm_data( - seq_len, - mm_counts, - mm_options, - mm_processor_kwargs=mm_processor_kwargs, - ) + dummy_mm_data = self.get_dummy_mm_data(seq_len, mm_counts, mm_options) dummy_mm_items = self.info.parse_mm_data(dummy_mm_data, validate=False) tokenization_kwargs = {"truncation": False} @@ -111,7 +102,6 @@ def get_dummy_processor_inputs( return ProcessorInputs( prompt=dummy_text, mm_items=dummy_mm_items, - hf_processor_mm_kwargs=mm_processor_kwargs or {}, tokenization_kwargs=tokenization_kwargs, ) diff --git a/vllm/multimodal/registry.py b/vllm/multimodal/registry.py index 340754d16a57..540b42f0e755 100644 --- a/vllm/multimodal/registry.py +++ b/vllm/multimodal/registry.py @@ -5,7 +5,6 @@ from multiprocessing.synchronize import Lock as LockType from typing import TYPE_CHECKING, Generic, Literal, Protocol, TypeVar, cast -from vllm.config.multimodal import BaseDummyOptions from vllm.config.observability import ObservabilityConfig from vllm.logger import init_logger from vllm.tokenizers import TokenizerLike, cached_tokenizer_from_config @@ -99,27 +98,6 @@ class MultiModalRegistry: A registry that dispatches data processing according to the model. """ - def _extract_mm_options( - self, - model_config: "ModelConfig", - ) -> Mapping[str, BaseDummyOptions] | None: - """ - Extract multimodal dummy options from model config. - - Returns None if no configurable options are found, otherwise returns - a mapping of modality names to their dummy options. - """ - if not model_config.multimodal_config: - return None - - mm_options = { - m: opt - for m in model_config.multimodal_config.limit_per_prompt - if (opt := model_config.multimodal_config.get_dummy_options(m)) is not None - } - - return mm_options if len(mm_options) > 0 else None - def supports_multimodal_inputs(self, model_config: "ModelConfig") -> bool: """ Checks if the model supports multimodal inputs. @@ -261,8 +239,7 @@ def get_dummy_mm_inputs( processor_inputs = processor.dummy_inputs.get_dummy_processor_inputs( seq_len=seq_len, mm_counts=mm_counts, - mm_options=self._extract_mm_options(model_config), - mm_processor_kwargs=mm_config.mm_processor_kwargs, + mm_options=mm_config.limit_per_prompt, ) mm_inputs = processor.apply( prompt=processor_inputs.prompt,