Skip to content

Commit 81aa5ff

Browse files
DarkLight1337vadiklyutiy
authored andcommitted
[Bugfix] Fix multi-api server not working for text models (vllm-project#21933)
Signed-off-by: DarkLight1337 <[email protected]>
1 parent ce4bef5 commit 81aa5ff

File tree

1 file changed

+1
-14
lines changed

1 file changed

+1
-14
lines changed

vllm/config.py

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -856,7 +856,7 @@ def maybe_pull_model_tokenizer_for_s3(self, model: str,
856856
self.tokenizer = s3_tokenizer.dir
857857

858858
def _init_multimodal_config(self) -> Optional["MultiModalConfig"]:
859-
if self.registry.is_multimodal_model(self.architectures, self):
859+
if self._model_info.supports_multimodal:
860860
return MultiModalConfig(
861861
limit_per_prompt=self.limit_mm_per_prompt,
862862
media_io_kwargs=self.media_io_kwargs,
@@ -865,19 +865,6 @@ def _init_multimodal_config(self) -> Optional["MultiModalConfig"]:
865865
disable_mm_preprocessor_cache,
866866
interleave_mm_strings=self.interleave_mm_strings)
867867

868-
if self.limit_mm_per_prompt:
869-
raise ValueError("`limit_mm_per_prompt` is only supported for "
870-
"multimodal models.")
871-
if self.mm_processor_kwargs:
872-
raise ValueError("`mm_processor_kwargs` is only supported for "
873-
"multimodal models.")
874-
if self.disable_mm_preprocessor_cache:
875-
raise ValueError("`disable_mm_preprocessor_cache` is only "
876-
"supported for multimodal models.")
877-
if self.interleave_mm_strings:
878-
raise ValueError("`interleave_mm_strings` is only "
879-
"supported for multimodal models.")
880-
881868
return None
882869

883870
def _get_encoder_config(self):

0 commit comments

Comments
 (0)