File tree Expand file tree Collapse file tree 4 files changed +5
-5
lines changed Expand file tree Collapse file tree 4 files changed +5
-5
lines changed Original file line number Diff line number Diff line change @@ -570,7 +570,7 @@ extern "C" {
570570 LLAMA_API bool llama_model_is_recurrent (const struct llama_model * model);
571571
572572 // Returns true if the model is hybrid-recurrent (like Jamba, Bamba, etc.)
573- LLAMA_API bool llama_model_is_hybrid (const struct llama_model * model);
573+ LLAMA_API bool llama_model_is_hybrid_recurrent (const struct llama_model * model);
574574
575575 // Returns 0 on success
576576 LLAMA_API uint32_t llama_model_quantize (
Original file line number Diff line number Diff line change @@ -1767,7 +1767,7 @@ bool llm_arch_is_recurrent(const llm_arch & arch) {
17671767 }
17681768}
17691769
1770- bool llm_arch_is_hybrid (const llm_arch & arch) {
1770+ bool llm_arch_is_hybrid_recurrent (const llm_arch & arch) {
17711771 // TODO: There are currently no hybrid models! Once there are, this will be
17721772 // the place to identify them
17731773 switch (arch) {
Original file line number Diff line number Diff line change @@ -439,4 +439,4 @@ llm_arch llm_arch_from_string(const std::string & name);
439439const llm_tensor_info & llm_tensor_info_for (llm_tensor tensor);
440440
441441bool llm_arch_is_recurrent (const llm_arch& arch);
442- bool llm_arch_is_hybrid (const llm_arch& arch);
442+ bool llm_arch_is_hybrid_recurrent (const llm_arch& arch);
Original file line number Diff line number Diff line change @@ -13828,8 +13828,8 @@ bool llama_model_is_recurrent(const llama_model * model) {
1382813828 return llm_arch_is_recurrent(model->arch);
1382913829}
1383013830
13831- bool llama_model_is_hybrid (const llama_model * model) {
13832- return llm_arch_is_hybrid (model->arch);
13831+ bool llama_model_is_hybrid_recurrent (const llama_model * model) {
13832+ return llm_arch_is_hybrid_recurrent (model->arch);
1383313833}
1383413834
1383513835const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
You can’t perform that action at this time.
0 commit comments