We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent b473e95 commit 7c26775Copy full SHA for 7c26775
llama.cpp
@@ -16260,6 +16260,11 @@ struct llama_context * llama_new_context_with_model(
16260
params.flash_attn = false;
16261
}
16262
16263
+ if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
16264
+ LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
16265
+ params.flash_attn = false;
16266
+ }
16267
+
16268
if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
16269
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
16270
return nullptr;
0 commit comments