Skip to content

Commit 25373b6

Browse files
for glm-4.1V update (#22000)
Signed-off-by: Isotr0py <[email protected]> Signed-off-by: zRzRzRzRzRzRzR <[email protected]> Co-authored-by: Isotr0py <[email protected]>
1 parent 58eee5f commit 25373b6

File tree

6 files changed

+24
-16
lines changed

6 files changed

+24
-16
lines changed

docs/models/supported_models.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,8 @@ See [this page](generative_models.md) for more information on how to use generat
591591
| `Gemma3ForConditionalGeneration` | Gemma 3 | T + I<sup>+</sup> | `google/gemma-3-4b-it`, `google/gemma-3-27b-it`, etc. | ✅︎ | ✅︎ | ⚠️ |
592592
| `GLM4VForCausalLM`<sup>^</sup> | GLM-4V | T + I | `THUDM/glm-4v-9b`, `THUDM/cogagent-9b-20241220`, etc. | ✅︎ | ✅︎ | ✅︎ |
593593
| `Glm4vForConditionalGeneration` | GLM-4.1V-Thinking | T + I<sup>E+</sup> + V<sup>E+</sup> | `THUDM/GLM-4.1V-9B-Thinking`, etc. | ✅︎ | ✅︎ | ✅︎ |
594-
| `Glm4MoeForCausalLM` | GLM-4.5 | T + I<sup>E+</sup> + V<sup>E+</sup> | `THUDM/GLM-4.5`, etc. | ✅︎ | ✅︎ | ✅︎ |
594+
| `Glm4MoeForCausalLM` | GLM-4.5 | T + I<sup>E+</sup> + V<sup>E+</sup> | `zai-org/GLM-4.5`, etc. | ✅︎ | ✅︎ | ✅︎ |
595+
| `Glm4v_moeForConditionalGeneration` | GLM-4.5V | T + I<sup>E+</sup> + V<sup>E+</sup> | `zai-org/GLM-4.5V-Air`, etc. | ✅︎ | ✅︎ | ✅︎ |
595596
| `GraniteSpeechForConditionalGeneration` | Granite Speech | T + A | `ibm-granite/granite-speech-3.3-8b` | ✅︎ | ✅︎ | ✅︎ |
596597
| `H2OVLChatModel` | H2OVL | T + I<sup>E+</sup> | `h2oai/h2ovl-mississippi-800m`, `h2oai/h2ovl-mississippi-2b`, etc. | | ✅︎ | ✅︎ |
597598
| `Idefics3ForConditionalGeneration` | Idefics3 | T + I | `HuggingFaceM4/Idefics3-8B-Llama3`, etc. | ✅︎ | | ✅︎ |

tests/models/registry.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -377,9 +377,10 @@ def check_available_online(
377377
"GLM4VForCausalLM": _HfExamplesInfo("THUDM/glm-4v-9b",
378378
trust_remote_code=True,
379379
hf_overrides={"architectures": ["GLM4VForCausalLM"]}), # noqa: E501
380-
"Glm4vForConditionalGeneration": _HfExamplesInfo("THUDM/GLM-4.1V-9B-Thinking", min_transformers_version="4.53"), # noqa: E501
381-
"Glm4MoeForCausalLM": _HfExamplesInfo("THUDM/GLM-4.5",
382-
min_transformers_version="4.54",
380+
"Glm4vForConditionalGeneration": _HfExamplesInfo("THUDM/GLM-4.1V-9B-Thinking"), # noqa: E501
381+
"Glm4MoeForCausalLM": _HfExamplesInfo("zai-org/GLM-4.5",
382+
min_transformers_version="4.54"), # noqa: E501
383+
"Glm4v_moeForConditionalGeneration": _HfExamplesInfo("zai-org/GLM-4.5V-Air",
383384
is_available_online=False), # noqa: E501
384385
"H2OVLChatModel": _HfExamplesInfo("h2oai/h2ovl-mississippi-800m",
385386
extras={"2b": "h2oai/h2ovl-mississippi-2b"}, # noqa: E501
@@ -515,8 +516,8 @@ def check_available_online(
515516
is_available_online=False,
516517
speculative_model="openbmb/MiniCPM-2B-sft-bf16",
517518
tokenizer="openbmb/MiniCPM-2B-sft-bf16"),
518-
"Glm4MoeMTPModel": _HfExamplesInfo("THUDM/GLM-4.5",
519-
speculative_model="THUDM/GLM-4.5",
519+
"Glm4MoeMTPModel": _HfExamplesInfo("zai-org/GLM-4.5",
520+
speculative_model="zai-org/GLM-4.5",
520521
min_transformers_version="4.54",
521522
is_available_online=False),
522523
"MiMoMTPModel": _HfExamplesInfo("XiaomiMiMo/MiMo-7B-RL",

tests/tool_use/test_glm4_moe_tool_parser.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
pytest.skip("skip glm4_moe parser test", allow_module_level=True)
1414
# Use a common model that is likely to be available
15-
MODEL = "THUDM/GLM-4.5"
15+
MODEL = "zai-org/GLM-4.5"
1616

1717

1818
@pytest.fixture(scope="module")

vllm/model_executor/layers/rotary_embedding.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1096,7 +1096,7 @@ def get_input_positions_tensor(
10961096
audio_feature_lengths=audio_feature_lengths,
10971097
use_audio_in_video=use_audio_in_video,
10981098
)
1099-
elif "glm4v" in hf_config.model_type:
1099+
elif hf_config.model_type in ["glm4v", "glm4v_moe"]:
11001100
return cls._glm4v_get_input_positions_tensor(
11011101
input_tokens=input_tokens,
11021102
hf_config=hf_config,

vllm/model_executor/models/glm4_1v.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,7 @@
3737
import torch.nn.functional as F
3838
from einops import rearrange
3939
from transformers import BatchFeature
40-
from transformers.models.glm4v.configuration_glm4v import (Glm4vConfig,
41-
Glm4vVisionConfig)
40+
from transformers.models.glm4v.configuration_glm4v import Glm4vVisionConfig
4241
from transformers.models.glm4v.image_processing_glm4v import (
4342
Glm4vImageProcessor, smart_resize)
4443
from transformers.models.glm4v.video_processing_glm4v import (
@@ -801,7 +800,7 @@ def load_weights(self, weights: Iterable[tuple[str,
801800
class Glm4vProcessingInfo(BaseProcessingInfo):
802801

803802
def get_hf_config(self):
804-
return self.ctx.get_hf_config(Glm4vConfig)
803+
return self.ctx.get_hf_config()
805804

806805
def get_tokenizer(self):
807806
return self.ctx.tokenizer
@@ -1253,7 +1252,7 @@ def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
12531252

12541253
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
12551254
super().__init__()
1256-
config: Glm4vConfig = vllm_config.model_config.hf_config
1255+
config = vllm_config.model_config.hf_config
12571256
quant_config = vllm_config.quant_config
12581257
multimodal_config = vllm_config.model_config.multimodal_config
12591258

@@ -1267,12 +1266,18 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
12671266
prefix=maybe_prefix(prefix, "visual"),
12681267
)
12691268

1269+
if config.model_type == "glm4v":
1270+
architectures = ["Glm4ForCausalLM"]
1271+
elif config.model_type == "glm4v_moe":
1272+
architectures = ["Glm4MoeForCausalLM"]
1273+
else:
1274+
architectures = None
1275+
12701276
self.language_model = init_vllm_registered_model(
12711277
vllm_config=vllm_config,
1272-
prefix=maybe_prefix(prefix, ""),
1273-
architectures=["Glm4ForCausalLM"],
1274-
hf_config=self.config.get_text_config(),
1275-
)
1278+
hf_config=config.text_config,
1279+
prefix=maybe_prefix(prefix, "language_model"),
1280+
architectures=architectures)
12761281

12771282
self.make_empty_intermediate_tensors = (
12781283
self.language_model.make_empty_intermediate_tensors)

vllm/model_executor/models/registry.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,7 @@
206206
"Gemma3ForConditionalGeneration": ("gemma3_mm", "Gemma3ForConditionalGeneration"), # noqa: E501
207207
"GLM4VForCausalLM": ("glm4v", "GLM4VForCausalLM"),
208208
"Glm4vForConditionalGeneration": ("glm4_1v", "Glm4vForConditionalGeneration"), # noqa: E501
209+
"Glm4v_moeForConditionalGeneration": ("glm4_1v", "Glm4vForConditionalGeneration"), # noqa: E501
209210
"GraniteSpeechForConditionalGeneration": ("granite_speech", "GraniteSpeechForConditionalGeneration"), # noqa: E501
210211
"H2OVLChatModel": ("h2ovl", "H2OVLChatModel"),
211212
"InternVLChatModel": ("internvl", "InternVLChatModel"),

0 commit comments

Comments
 (0)