Skip to content

Commit 7f6e757

Browse files
committed
use "built-in" instead of "supported"
1 parent 47b0528 commit 7f6e757

File tree

3 files changed

+5
-5
lines changed

3 files changed

+5
-5
lines changed

include/llama.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -990,8 +990,8 @@ extern "C" {
990990
char * buf,
991991
int32_t length);
992992

993-
// Get list of supported chat templates
994-
int32_t llama_chat_supported_templates(const char ** output, size_t len);
993+
// Get list of built-in chat templates
994+
int32_t llama_chat_builtin_templates(const char ** output, size_t len);
995995

996996
//
997997
// Sampling API

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22370,7 +22370,7 @@ int32_t llama_chat_apply_template(
2237022370
return res;
2237122371
}
2237222372

22373-
int32_t llama_chat_supported_templates(const char ** output, size_t len) {
22373+
int32_t llama_chat_builtin_templates(const char ** output, size_t len) {
2237422374
auto it = LLM_CHAT_TEMPLATES.begin();
2237522375
for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) {
2237622376
output[i] = it->first.c_str();

tests/test-chat-template.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,10 +135,10 @@ int main(void) {
135135

136136
// list all supported templates
137137
std::vector<const char *> supported_tmpl(1024);
138-
res = llama_chat_supported_templates(supported_tmpl.data(), supported_tmpl.size());
138+
res = llama_chat_builtin_templates(supported_tmpl.data(), supported_tmpl.size());
139139
assert(res > 0);
140140
supported_tmpl.resize(res);
141-
printf("Supported templates:\n");
141+
printf("Built-in chat templates:\n");
142142
for (auto tmpl : supported_tmpl) {
143143
printf(" %s\n", tmpl);
144144
}

0 commit comments

Comments
 (0)