@@ -1429,7 +1429,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
14291429 ml.get_key(LLM_KV_FIRST_K_DENSE_REPLACE, hparams.first_k_dense_replace, 0);
14301430
14311431 // Expert gating function (GLM45 uses sigmoid)
1432- ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, ( uint32_t) LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID);
1432+ ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, static_cast< uint32_t>( LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID) );
14331433
14341434 switch (hparams.n_layer) {
14351435 case 46: type = LLM_TYPE_12B; break; // GLM-4.5-Air
@@ -4404,7 +4404,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
44044404 layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
44054405
44064406 // Check if this layer uses MoE or dense FFN based on first_k_dense_replace
4407- const bool use_moe = (hparams.n_expert > 0) && (i >= hparams.first_k_dense_replace);
4407+ const bool use_moe = (hparams.n_expert > 0) && (static_cast<uint32_t>(i) >= hparams.first_k_dense_replace);
44084408
44094409 if (use_moe) {
44104410 // MoE layers
@@ -13445,7 +13445,7 @@ struct llm_build_glm4 : public llm_graph_context {
1344513445struct llm_build_glm45 : public llm_graph_context {
1344613446 llm_build_glm45(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
1344713447 const int64_t n_embd_head = hparams.n_embd_head_v;
13448- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
13448+ // const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
1344913449
1345013450 GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
1345113451
@@ -13541,7 +13541,7 @@ struct llm_build_glm45 : public llm_graph_context {
1354113541 cb(cur, "ffn_norm", il);
1354213542
1354313543 // Check if this is a dense layer (first_k_dense_replace=1, so layer 0 is dense)
13544- if (il < hparams.first_k_dense_replace) {
13544+ if (static_cast<uint32_t>(il) < hparams.first_k_dense_replace) {
1354513545 // Dense FFN layer
1354613546 cur = build_ffn(cur,
1354713547 model.layers[il].ffn_up, NULL, NULL,
0 commit comments