@@ -1542,6 +1542,9 @@ void llama_model::load_hparams(llama_model_loader & ml) {
15421542 hparams.dec_start_token_id = dec_start_token_id;
15431543 }
15441544
1545+ hparams.dec_n_layer = hparams.n_layer;
1546+ ml.get_key(LLM_KV_DECODER_BLOCK_COUNT, hparams.dec_n_layer, false);
1547+
15451548 switch (hparams.n_layer) {
15461549 case 6: type = LLM_TYPE_60M; break; // t5-small
15471550 case 8: type = LLM_TYPE_80M; break; // flan-t5-small
@@ -4414,6 +4417,14 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
44144417 output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
44154418 }
44164419
4420+ // n_layer: number of encoder_layers
4421+ // dec_n_layer: number of decoder_layers
4422+ const int dec_n_layer = hparams.dec_n_layer;
4423+ if (dec_n_layer > n_layer) {
4424+ layers.resize(dec_n_layer);
4425+ }
4426+
4427+ // load encoder layers
44174428 for (int i = 0; i < n_layer; ++i) {
44184429 auto & layer = layers[i];
44194430
@@ -4429,6 +4440,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
44294440 layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
44304441 layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
44314442 layer.ffn_up_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
4443+ }
4444+
4445+ // load decoder layers
4446+ for (int i = 0; i < dec_n_layer; ++i) {
4447+ auto & layer = layers[i];
44324448
44334449 layer.attn_norm = create_tensor(tn(LLM_TENSOR_DEC_ATTN_NORM, "weight", i), {n_embd}, 0);
44344450 layer.attn_rel_b = create_tensor(tn(LLM_TENSOR_DEC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
@@ -13509,7 +13525,9 @@ struct llm_build_t5_dec : public llm_graph_context {
1350913525
1351013526 ggml_tensor * inp_out_ids = build_inp_out_ids();
1351113527
13512- for (int il = 0; il < n_layer; ++il) {
13528+ const int64_t dec_n_layer = hparams.dec_n_layer;
13529+
13530+ for (int il = 0; il < dec_n_layer; ++il) {
1351313531 ggml_tensor * inpSA = inpL;
1351413532
1351513533 // norm
@@ -13600,7 +13618,7 @@ struct llm_build_t5_dec : public llm_graph_context {
1360013618 //cb(cur, "kqv_out", il);
1360113619 }
1360213620
13603- if (il == n_layer - 1 && inp_out_ids) {
13621+ if (il == dec_n_layer - 1 && inp_out_ids) {
1360413622 cur = ggml_get_rows(ctx0, cur, inp_out_ids);
1360513623 inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids);
1360613624 }
@@ -13621,8 +13639,8 @@ struct llm_build_t5_dec : public llm_graph_context {
1362113639 model.layers[il].ffn_gate, NULL, NULL,
1362213640 model.layers[il].ffn_down, NULL, NULL,
1362313641 NULL,
13624- model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
13625- model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
13642+ model.layers[il].ffn_gate ? LLM_FFN_GELU : LLM_FFN_RELU,
13643+ model.layers[il].ffn_gate ? LLM_FFN_PAR : LLM_FFN_SEQ,
1362613644 il);
1362713645 cb(cur, "ffn_out", il);
1362813646 }
0 commit comments