We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 717a5f8 commit ed92013Copy full SHA for ed92013
vllm/model_executor/models/molmo.py
@@ -946,9 +946,12 @@ def pad_images(
946
947
948
def input_processor_for_molmo(ctx: InputContext, llm_inputs: LLMInputs):
949
- prompt = llm_inputs["prompt"]
950
- multi_modal_data = llm_inputs.get("multi_modal_data")
951
- image = multi_modal_data.get("image")
+ prompt = llm_inputs.get("prompt", None)
+ multi_modal_data = llm_inputs.get("multi_modal_data", None)
+ if multi_modal_data is not None:
952
+ image = multi_modal_data.get("image", None)
953
+ else:
954
+ image = None
955
processor = cached_get_processor(ctx.model_config.model,
956
trust_remote_code=True,
957
revision=ctx.model_config.code_revision)
0 commit comments