@@ -80,6 +80,7 @@ const char * llm_type_name(llm_type type) {
8080 case LLM_TYPE_236B: return "236B";
8181 case LLM_TYPE_290B: return "290B";
8282 case LLM_TYPE_314B: return "314B";
83+ case LLM_TYPE_405B: return "405B";
8384 case LLM_TYPE_671B: return "671B";
8485 case LLM_TYPE_SMALL: return "0.1B";
8586 case LLM_TYPE_MEDIUM: return "0.4B";
@@ -582,6 +583,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
582583 switch (hparams.n_layer) {
583584 case 32: type = LLM_TYPE_7B; break;
584585 case 80: type = LLM_TYPE_70B; break;
586+ case 162: type = LLM_TYPE_405B; break;
585587 default: type = LLM_TYPE_UNKNOWN;
586588 }
587589 } break;
@@ -1848,7 +1850,9 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
18481850 layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
18491851 layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
18501852
1851- layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
1853+ if (n_ff > 0) {
1854+ layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
1855+ }
18521856
18531857 if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
18541858 layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
@@ -1858,9 +1862,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
18581862 layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
18591863 }
18601864
1861- layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
1862- layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
1863- layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
1865+ if (n_ff > 0) {
1866+ layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
1867+ layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
1868+ layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
1869+ }
18641870
18651871 // optional MLP bias
18661872 layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
@@ -4705,6 +4711,7 @@ struct llm_build_deci : public llm_graph_context {
47054711 ggml_tensor * inpSA = inpL;
47064712 const int64_t n_head_kv = hparams.n_head_kv(il);
47074713 const int64_t n_head = hparams.n_head(il);
4714+ const int64_t n_ff = hparams.n_ff(il);
47084715
47094716 if (n_head == 0) {
47104717 // attention-free layer of Llama-3_1-Nemotron-51B
@@ -4780,6 +4787,11 @@ struct llm_build_deci : public llm_graph_context {
47804787 inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
47814788 }
47824789
4790+ // FFN-free layer of Llama-3_1-Nemotron-Ultra-253B
4791+ if (n_head == 0 && n_ff == 0) {
4792+ continue;
4793+ }
4794+
47834795 // For Granite architecture
47844796 if (hparams.f_residual_scale) {
47854797 cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
0 commit comments