We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e64f8e4 commit 585444bCopy full SHA for 585444b
src/llama-model.cpp
@@ -12977,7 +12977,7 @@ struct llm_build_hybrid_mamba : public llm_graph_context {
12977
}
12978
12979
// Extract the recurrent cache from the hybrid parent
12980
- const auto * kv_recurrent = static_cast<const llama_kv_cache_hybrid *>(memory)->get_child_cache<llama_kv_cache_recurrent>();
+ const auto * kv_recurrent = static_cast<const llama_kv_cache_hybrid_recurrent *>(memory)->get_kv_recurrent();
12981
GGML_ASSERT(kv_recurrent);
12982
12983
for (int il = 0; il < n_layer; ++il) {
0 commit comments