We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 770184d commit 83fb5b8Copy full SHA for 83fb5b8
src/llama-kv-cache.cpp
@@ -32,7 +32,7 @@ bool llama_kv_cache_init(
32
33
cache.recurrent = llama_model_is_recurrent(&model);
34
cache.v_trans = !cache.recurrent && !cparams.flash_attn;
35
- cache.can_shift = !cache.recurrent; // not supported due to MLA
+ cache.can_shift = !cache.recurrent && model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA
36
37
LLAMA_LOG_INFO("%s: kv_size = %d, offload = %d, type_k = '%s', type_v = '%s', n_layer = %d, can_shift = %d\n",
38
__func__, kv_size, offload, ggml_type_name(type_k), ggml_type_name(type_v), n_layer, cache.can_shift);
0 commit comments