Skip to content

Commit 1647e2b

Browse files
committed
fix
1 parent dcbce53 commit 1647e2b

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -595,6 +595,7 @@ static struct ggml_tensor * llm_build_kqv(
595595
padded_v = ggml_pad(ctx, v, 0, k->ne[0] - v->ne[1], 0, 0);
596596
cb(padded_v, "padded_v", il);
597597
n_embd_head_v_out = n_embd_head_k;
598+
padded_v = ggml_cont(ctx, padded_v);
598599
}
599600

600601
cur = ggml_flash_attn_ext(ctx, q, k, padded_v, kq_mask, kq_scale, hparams.f_max_alibi_bias,
@@ -613,8 +614,7 @@ static struct ggml_tensor * llm_build_kqv(
613614
ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
614615

615616
if (n_embd_head_v < n_embd_head_k) {
616-
cur = ggml_cont(ctx, cur);
617-
cur = ggml_cont(ctx, ggml_view_3d(ctx, cur, n_embd_head_v, n_head, n_tokens,
617+
cur = ggml_cont(ctx, ggml_view_3d(ctx, ggml_cont(ctx, cur), n_embd_head_v, n_head, n_tokens,
618618
ggml_element_size(cur) * n_embd_head_v_out,
619619
ggml_element_size(cur) * n_embd_head_v_out * n_head,
620620
0));

0 commit comments

Comments
 (0)