Skip to content

Commit 6afd1a9

Browse files
llama : add support for lora adapters in T5 model (#8938)
Co-authored-by: Stanisław Szymczyk <[email protected]>
1 parent 272e3bd commit 6afd1a9

File tree

1 file changed

+13
-13
lines changed

1 file changed

+13
-13
lines changed

src/llama.cpp

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -13167,13 +13167,13 @@ struct llm_build_context {
1316713167

1316813168
// self-attention
1316913169
{
13170-
struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq_enc, cur);
13170+
struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_enc, cur);
1317113171
cb(Qcur, "Qcur", il);
1317213172

13173-
struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk_enc, cur);
13173+
struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_enc, cur);
1317413174
cb(Kcur, "Kcur", il);
1317513175

13176-
struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv_enc, cur);
13176+
struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_enc, cur);
1317713177
cb(Vcur, "Vcur", il);
1317813178

1317913179
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
@@ -13207,7 +13207,7 @@ struct llm_build_context {
1320713207

1320813208
ggml_build_forward_expand(gf, cur);
1320913209

13210-
cur = ggml_mul_mat(ctx0, model.layers[il].wo_enc, cur);
13210+
cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_enc, cur);
1321113211
cb(cur, "kqv_out", il);
1321213212
}
1321313213

@@ -13281,13 +13281,13 @@ struct llm_build_context {
1328113281

1328213282
// self-attention
1328313283
{
13284-
struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
13284+
struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
1328513285
cb(Qcur, "Qcur", il);
1328613286

13287-
struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
13287+
struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
1328813288
cb(Kcur, "Kcur", il);
1328913289

13290-
struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
13290+
struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
1329113291
cb(Vcur, "Vcur", il);
1329213292

1329313293
llm_build_kv_store(ctx0, hparams, cparams, kv_self, gf, Kcur, Vcur, n_tokens, kv_head, cb, il);
@@ -13334,7 +13334,7 @@ struct llm_build_context {
1333413334

1333513335
ggml_build_forward_expand(gf, cur);
1333613336

13337-
cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur);
13337+
cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur);
1333813338
cb(cur, "kqv_out", il);
1333913339
}
1334013340

@@ -13351,13 +13351,13 @@ struct llm_build_context {
1335113351

1335213352
// cross-attention
1335313353
{
13354-
struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq_cross, cur);
13354+
struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_cross, cur);
1335513355
cb(Qcur, "Qcur", il);
1335613356

13357-
struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk_cross, embd_enc);
13357+
struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_cross, embd_enc);
1335813358
cb(Kcur, "Kcur", il);
1335913359

13360-
struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv_cross, embd_enc);
13360+
struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_cross, embd_enc);
1336113361
cb(Vcur, "Vcur", il);
1336213362

1336313363
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
@@ -13386,7 +13386,7 @@ struct llm_build_context {
1338613386

1338713387
ggml_build_forward_expand(gf, cur);
1338813388

13389-
cur = ggml_mul_mat(ctx0, model.layers[il].wo_cross, cur);
13389+
cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_cross, cur);
1339013390
cb(cur, "kqv_out", il);
1339113391
}
1339213392

@@ -13443,7 +13443,7 @@ struct llm_build_context {
1344313443
cb(cur, "result_norm", -1);
1344413444

1344513445
// lm_head
13446-
cur = ggml_mul_mat(ctx0, model.output, cur);
13446+
cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
1344713447
cb(cur, "result_output", -1);
1344813448
}
1344913449

0 commit comments

Comments
 (0)