We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 569aefd commit 0e3bb54Copy full SHA for 0e3bb54
vllm/model_executor/models/transformers.py
@@ -709,6 +709,13 @@ def _can_concat(x: list[torch.Tensor]):
709
MultiModalProcessor,
710
info=MultiModalProcessingInfo,
711
dummy_inputs=MultiModalDummyInputsBuilder)
712
+@support_torch_compile(
713
+ dynamic_arg_dims={
714
+ "input_ids": 0,
715
+ "positions": -1,
716
+ "intermediate_tensors": 0,
717
+ "inputs_embeds": 0,
718
+ }) # set `positions` to last dim to support Qwen-mrope
719
class TransformersForMultimodalLM(TransformersForCausalLM, SupportsMultiModal):
720
# Backwards compatibility for prev released models. State dicts back then
721
# had different formats and cannot be loaded with `AutoModel` mapping as is
0 commit comments