@@ -54,6 +54,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
5454 { LLM_ARCH_DEEPSEEK, " deepseek" },
5555 { LLM_ARCH_DEEPSEEK2, " deepseek2" },
5656 { LLM_ARCH_CHATGLM, " chatglm" },
57+ { LLM_ARCH_GLM4, " glm4" },
5758 { LLM_ARCH_BITNET, " bitnet" },
5859 { LLM_ARCH_T5, " t5" },
5960 { LLM_ARCH_T5ENCODER, " t5encoder" },
@@ -1152,6 +1153,25 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
11521153 { LLM_TENSOR_FFN_DOWN, " blk.%d.ffn_down" },
11531154 },
11541155 },
1156+ {
1157+ LLM_ARCH_GLM4,
1158+ {
1159+ { LLM_TENSOR_TOKEN_EMBD, " token_embd" },
1160+ { LLM_TENSOR_ROPE_FREQS, " rope_freqs" },
1161+ { LLM_TENSOR_OUTPUT_NORM, " output_norm" },
1162+ { LLM_TENSOR_OUTPUT, " output" },
1163+ { LLM_TENSOR_ATTN_NORM, " blk.%d.attn_norm" },
1164+ { LLM_TENSOR_ATTN_Q, " blk.%d.attn_q" },
1165+ { LLM_TENSOR_ATTN_K, " blk.%d.attn_k" },
1166+ { LLM_TENSOR_ATTN_V, " blk.%d.attn_v" },
1167+ { LLM_TENSOR_ATTN_OUT, " blk.%d.attn_output" },
1168+ { LLM_TENSOR_FFN_NORM, " blk.%d.ffn_norm" },
1169+ { LLM_TENSOR_FFN_UP, " blk.%d.ffn_up" },
1170+ { LLM_TENSOR_FFN_DOWN, " blk.%d.ffn_down" },
1171+ { LLM_TENSOR_ATTN_POST_NORM, " blk.%d.post_attention_norm" },
1172+ { LLM_TENSOR_FFN_POST_NORM, " blk.%d.post_ffw_norm" },
1173+ },
1174+ },
11551175 {
11561176 LLM_ARCH_BITNET,
11571177 {
0 commit comments