Skip to content

Commit 041e294

Browse files
authored
[Misc] add mm_processor_kwargs to extra_body for Qwen2.5-VL (#13533)
1 parent 9621667 commit 041e294

File tree

4 files changed

+18
-2
lines changed

4 files changed

+18
-2
lines changed

vllm/entrypoints/openai/protocol.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -312,6 +312,10 @@ class ChatCompletionRequest(OpenAIBaseModel):
312312
description=("Additional kwargs to pass to the template renderer. "
313313
"Will be accessible by the chat template."),
314314
)
315+
mm_processor_kwargs: Optional[Dict[str, Any]] = Field(
316+
default=None,
317+
description=("Additional kwargs to pass to the HF processor."),
318+
)
315319
guided_json: Optional[Union[str, dict, BaseModel]] = Field(
316320
default=None,
317321
description=("If specified, the output will follow the JSON schema."),

vllm/entrypoints/openai/serving_engine.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -451,6 +451,8 @@ async def _preprocess_chat(
451451
prompt_token_ids=prompt_inputs["prompt_token_ids"])
452452
if mm_data is not None:
453453
engine_prompt["multi_modal_data"] = mm_data
454+
if request.mm_processor_kwargs is not None:
455+
engine_prompt["mm_processor_kwargs"] = request.mm_processor_kwargs
454456

455457
return conversation, [request_prompt], [engine_prompt]
456458

vllm/model_executor/models/qwen2_5_vl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -689,7 +689,7 @@ def get_hf_processor(
689689
min_pixels: Optional[int] = None,
690690
max_pixels: Optional[int] = None,
691691
size: Optional[dict[str, int]] = None,
692-
fps: Optional[float] = None,
692+
fps: Optional[Union[float, List[float]]] = None,
693693
**kwargs: object,
694694
) -> Qwen2_5_VLProcessor:
695695
if fps is not None:

vllm/transformers_utils/processor.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,15 @@ def __hash__(self) -> int: # type: ignore[override]
2323
return hash(frozenset(self.items()))
2424

2525

26+
class HashableList(list):
27+
"""
28+
A list that can be hashed by lru_cache.
29+
"""
30+
31+
def __hash__(self) -> int: # type: ignore[override]
32+
return hash(tuple(self))
33+
34+
2635
def _merge_mm_kwargs(model_config: "ModelConfig", **kwargs):
2736
base_kwargs = model_config.mm_processor_kwargs
2837
if base_kwargs is None:
@@ -36,7 +45,8 @@ def _merge_mm_kwargs(model_config: "ModelConfig", **kwargs):
3645
for key, value in merged_kwargs.items():
3746
if isinstance(value, dict):
3847
merged_kwargs[key] = HashableDict(value)
39-
48+
if isinstance(value, list):
49+
merged_kwargs[key] = HashableList(value)
4050
return merged_kwargs
4151

4252

0 commit comments

Comments
 (0)