@@ -13167,13 +13167,13 @@ struct llm_build_context {
13167
13167
13168
13168
// self-attention
13169
13169
{
13170
- struct ggml_tensor * Qcur = ggml_mul_mat( ctx0, model.layers[il].wq_enc, cur);
13170
+ struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_enc, cur);
13171
13171
cb(Qcur, "Qcur", il);
13172
13172
13173
- struct ggml_tensor * Kcur = ggml_mul_mat( ctx0, model.layers[il].wk_enc, cur);
13173
+ struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_enc, cur);
13174
13174
cb(Kcur, "Kcur", il);
13175
13175
13176
- struct ggml_tensor * Vcur = ggml_mul_mat( ctx0, model.layers[il].wv_enc, cur);
13176
+ struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_enc, cur);
13177
13177
cb(Vcur, "Vcur", il);
13178
13178
13179
13179
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
@@ -13207,7 +13207,7 @@ struct llm_build_context {
13207
13207
13208
13208
ggml_build_forward_expand(gf, cur);
13209
13209
13210
- cur = ggml_mul_mat( ctx0, model.layers[il].wo_enc, cur);
13210
+ cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_enc, cur);
13211
13211
cb(cur, "kqv_out", il);
13212
13212
}
13213
13213
@@ -13281,13 +13281,13 @@ struct llm_build_context {
13281
13281
13282
13282
// self-attention
13283
13283
{
13284
- struct ggml_tensor * Qcur = ggml_mul_mat( ctx0, model.layers[il].wq, cur);
13284
+ struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
13285
13285
cb(Qcur, "Qcur", il);
13286
13286
13287
- struct ggml_tensor * Kcur = ggml_mul_mat( ctx0, model.layers[il].wk, cur);
13287
+ struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
13288
13288
cb(Kcur, "Kcur", il);
13289
13289
13290
- struct ggml_tensor * Vcur = ggml_mul_mat( ctx0, model.layers[il].wv, cur);
13290
+ struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
13291
13291
cb(Vcur, "Vcur", il);
13292
13292
13293
13293
llm_build_kv_store(ctx0, hparams, cparams, kv_self, gf, Kcur, Vcur, n_tokens, kv_head, cb, il);
@@ -13334,7 +13334,7 @@ struct llm_build_context {
13334
13334
13335
13335
ggml_build_forward_expand(gf, cur);
13336
13336
13337
- cur = ggml_mul_mat( ctx0, model.layers[il].wo, cur);
13337
+ cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur);
13338
13338
cb(cur, "kqv_out", il);
13339
13339
}
13340
13340
@@ -13351,13 +13351,13 @@ struct llm_build_context {
13351
13351
13352
13352
// cross-attention
13353
13353
{
13354
- struct ggml_tensor * Qcur = ggml_mul_mat( ctx0, model.layers[il].wq_cross, cur);
13354
+ struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_cross, cur);
13355
13355
cb(Qcur, "Qcur", il);
13356
13356
13357
- struct ggml_tensor * Kcur = ggml_mul_mat( ctx0, model.layers[il].wk_cross, embd_enc);
13357
+ struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_cross, embd_enc);
13358
13358
cb(Kcur, "Kcur", il);
13359
13359
13360
- struct ggml_tensor * Vcur = ggml_mul_mat( ctx0, model.layers[il].wv_cross, embd_enc);
13360
+ struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_cross, embd_enc);
13361
13361
cb(Vcur, "Vcur", il);
13362
13362
13363
13363
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
@@ -13386,7 +13386,7 @@ struct llm_build_context {
13386
13386
13387
13387
ggml_build_forward_expand(gf, cur);
13388
13388
13389
- cur = ggml_mul_mat( ctx0, model.layers[il].wo_cross, cur);
13389
+ cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_cross, cur);
13390
13390
cb(cur, "kqv_out", il);
13391
13391
}
13392
13392
@@ -13443,7 +13443,7 @@ struct llm_build_context {
13443
13443
cb(cur, "result_norm", -1);
13444
13444
13445
13445
// lm_head
13446
- cur = ggml_mul_mat( ctx0, model.output, cur);
13446
+ cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
13447
13447
cb(cur, "result_output", -1);
13448
13448
}
13449
13449
0 commit comments