Skip to content

Commit fbd8595

Browse files
authored
[Bugfix] Fix basic models tests hanging due to mm processor creation (#22571)
Signed-off-by: Isotr0py <[email protected]>
1 parent 5a16fa6 commit fbd8595

File tree

1 file changed

+23
-5
lines changed

1 file changed

+23
-5
lines changed

vllm/multimodal/registry.py

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -138,8 +138,8 @@ def supports_multimodal_inputs(self, model_config: "ModelConfig") -> bool:
138138
if not model_config.is_multimodal_model:
139139
return False
140140

141-
processor = self.create_processor(model_config, disable_cache=False)
142-
supported_modalities = processor.info.get_supported_mm_limits()
141+
info = self._create_processing_info(model_config, tokenizer=None)
142+
supported_modalities = info.get_supported_mm_limits()
143143

144144
mm_config = model_config.get_multimodal_config()
145145

@@ -278,6 +278,26 @@ def _get_model_cls(self, model_config: "ModelConfig"):
278278
model_cls, _ = get_model_architecture(model_config)
279279
return model_cls
280280

281+
def _create_processing_ctx(
282+
self,
283+
model_config: "ModelConfig",
284+
tokenizer: Optional[AnyTokenizer] = None,
285+
) -> InputProcessingContext:
286+
if tokenizer is None and not model_config.skip_tokenizer_init:
287+
tokenizer = cached_tokenizer_from_config(model_config)
288+
return InputProcessingContext(model_config, tokenizer)
289+
290+
def _create_processing_info(
291+
self,
292+
model_config: "ModelConfig",
293+
*,
294+
tokenizer: Optional[AnyTokenizer] = None,
295+
) -> BaseProcessingInfo:
296+
model_cls = self._get_model_cls(model_config)
297+
factories = self._processor_factories[model_cls]
298+
ctx = self._create_processing_ctx(model_config, tokenizer)
299+
return factories.info(ctx)
300+
281301
def create_processor(
282302
self,
283303
model_config: "ModelConfig",
@@ -291,15 +311,13 @@ def create_processor(
291311
if not model_config.is_multimodal_model:
292312
raise ValueError(f"{model_config.model} is not a multimodal model")
293313

294-
if tokenizer is None and not model_config.skip_tokenizer_init:
295-
tokenizer = cached_tokenizer_from_config(model_config)
296314
if disable_cache is None:
297315
disable_cache = not model_config.enable_mm_processor_cache
298316

299317
model_cls = self._get_model_cls(model_config)
300318
factories = self._processor_factories[model_cls]
301319

302-
ctx = InputProcessingContext(model_config, tokenizer)
320+
ctx = self._create_processing_ctx(model_config, tokenizer)
303321
cache = None if disable_cache else self._get_processor_cache(
304322
model_config)
305323

0 commit comments

Comments
 (0)