We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 3914044 commit 17602b7Copy full SHA for 17602b7
src/llama-model.cpp
@@ -12976,7 +12976,7 @@ struct llm_build_hybrid_mamba : public llm_graph_context {
12976
}
12977
12978
// Extract the recurrent cache from the hybrid parent
12979
- const auto * kv_recurrent = static_cast<const llama_kv_cache_hybrid *>(memory)->get_child_cache<llama_kv_cache_recurrent>();
+ const auto * kv_recurrent = static_cast<const llama_kv_cache_hybrid_recurrent *>(memory)->get_kv_recurrent();
12980
GGML_ASSERT(kv_recurrent);
12981
12982
for (int il = 0; il < n_layer; ++il) {
0 commit comments