Skip to content

Commit b3c948a

Browse files
committed
fix: Remove llama_model_is_hybrid_Recurrent public API
#13979 (comment) Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart <[email protected]>
1 parent 74ad4f8 commit b3c948a

File tree

2 files changed

+0
-7
lines changed

2 files changed

+0
-7
lines changed

include/llama.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -569,9 +569,6 @@ extern "C" {
569569
// Returns true if the model is recurrent (like Mamba, RWKV, etc.)
570570
LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model);
571571

572-
// Returns true if the model is hybrid-recurrent (like Jamba, Bamba, etc.)
573-
LLAMA_API bool llama_model_is_hybrid_recurrent(const struct llama_model * model);
574-
575572
// Returns 0 on success
576573
LLAMA_API uint32_t llama_model_quantize(
577574
const char * fname_inp,

src/llama-model.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13849,10 +13849,6 @@ bool llama_model_is_recurrent(const llama_model * model) {
1384913849
return llm_arch_is_recurrent(model->arch);
1385013850
}
1385113851

13852-
bool llama_model_is_hybrid_recurrent(const llama_model * model) {
13853-
return llm_arch_is_hybrid_recurrent(model->arch);
13854-
}
13855-
1385613852
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
1385713853
return model->tensors_by_name;
1385813854
}

0 commit comments

Comments
 (0)