Skip to content

Commit db9dd0c

Browse files
author
ochafik
committed
Finish suggested renamings
1 parent 153e852 commit db9dd0c

File tree

5 files changed

+29
-29
lines changed

5 files changed

+29
-29
lines changed

common/common.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1827,7 +1827,7 @@ llama_chat_templates common_chat_templates_from_model(const struct llama_model *
18271827
auto token_bos = common_token_to_piece(vocab, llama_vocab_bos(vocab), true);
18281828
auto token_eos = common_token_to_piece(vocab, llama_vocab_eos(vocab), true);
18291829
std::string default_template_src = chat_template_override;
1830-
std::string tool_use_template_src = chat_template_override;
1830+
std::string template_tool_use_src = chat_template_override;
18311831
bool has_explicit_template = !chat_template_override.empty();
18321832
if (chat_template_override.empty()) {
18331833
auto str = llama_model_chat_template(model, /* name */ nullptr);
@@ -1837,13 +1837,13 @@ llama_chat_templates common_chat_templates_from_model(const struct llama_model *
18371837
}
18381838
str = llama_model_chat_template(model, /* name */ "tool_use");
18391839
if (str) {
1840-
tool_use_template_src = str;
1840+
template_tool_use_src = str;
18411841
has_explicit_template = true;
18421842
}
18431843
}
18441844
if (default_template_src.empty() || default_template_src == "chatml") {
1845-
if (!tool_use_template_src.empty()) {
1846-
default_template_src = tool_use_template_src;
1845+
if (!template_tool_use_src.empty()) {
1846+
default_template_src = template_tool_use_src;
18471847
} else {
18481848
default_template_src = R"(
18491849
{%- for message in messages -%}
@@ -1857,10 +1857,10 @@ llama_chat_templates common_chat_templates_from_model(const struct llama_model *
18571857
}
18581858
return {
18591859
has_explicit_template,
1860-
std::make_unique<minja::chat_template>(default_template_src, bos_token, eos_token),
1861-
tool_use_template_src.empty()
1860+
std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos),
1861+
template_tool_use_src.empty()
18621862
? nullptr
1863-
: std::make_unique<minja::chat_template>(tool_use_template_src, bos_token, eos_token)
1863+
: std::make_unique<minja::chat_template>(template_tool_use_src, token_bos, token_eos)
18641864
};
18651865
}
18661866

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -632,7 +632,7 @@ std::string common_chat_format_single(
632632
std::string common_chat_format_example(
633633
const llama_chat_template & tmpl, bool use_jinja);
634634

635-
llama_chat_templates llama_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override);
635+
llama_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override);
636636

637637
//
638638
// KV cache utils

examples/main/main.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ int main(int argc, char ** argv) {
158158
}
159159

160160
const llama_vocab * vocab = llama_model_get_vocab(model);
161-
auto chat_templates = llama_chat_templates_from_model(model, params.chat_template);
161+
auto chat_templates = common_chat_templates_from_model(model, params.chat_template);
162162

163163
LOG_INF("%s: llama threadpool init, n_threads = %d\n", __func__, (int) params.cpuparams.n_threads);
164164

@@ -201,7 +201,7 @@ int main(int argc, char ** argv) {
201201
}
202202

203203
// auto enable conversation mode if chat template is available
204-
const bool has_chat_template = chat_templates.has_explicit_template && chat_templates.default_template;
204+
const bool has_chat_template = chat_templates.has_explicit_template && chat_templates.template_default;
205205
if (params.conversation_mode == COMMON_CONVERSATION_MODE_AUTO) {
206206
if (has_chat_template) {
207207
LOG_INF("%s: chat template is available, enabling conversation mode (disable it with -no-cnv)\n", __func__);
@@ -219,7 +219,7 @@ int main(int argc, char ** argv) {
219219
// print chat template example in conversation mode
220220
if (params.conversation_mode) {
221221
if (params.enable_chat_template) {
222-
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(*chat_templates.default_template, params.use_jinja).c_str());
222+
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(*chat_templates.template_default, params.use_jinja).c_str());
223223
} else {
224224
LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
225225
}
@@ -265,7 +265,7 @@ int main(int argc, char ** argv) {
265265

266266
auto chat_add_and_format = [&chat_msgs, &chat_templates](const std::string & role, const std::string & content) {
267267
common_chat_msg new_msg{role, content};
268-
auto formatted = common_chat_format_single(*chat_templates.default_template, chat_msgs, new_msg, role == "user", g_params->use_jinja);
268+
auto formatted = common_chat_format_single(*chat_templates.template_default, chat_msgs, new_msg, role == "user", g_params->use_jinja);
269269
chat_msgs.push_back({role, content});
270270
LOG_DBG("formatted: '%s'\n", formatted.c_str());
271271
return formatted;

examples/run/run.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -936,8 +936,8 @@ static int get_user_input(std::string & user_input, const std::string & user) {
936936
static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_jinja) {
937937
int prev_len = 0;
938938
llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get()));
939-
auto chat_templates = llama_chat_templates_from_model(llama_data.model.get(), "");
940-
GGML_ASSERT(chat_templates.default_template);
939+
auto chat_templates = common_chat_templates_from_model(llama_data.model.get(), "");
940+
GGML_ASSERT(chat_templates.template_default);
941941
static const bool stdout_a_terminal = is_stdout_a_terminal();
942942
while (true) {
943943
// Get user input
@@ -948,7 +948,7 @@ static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_
948948

949949
add_message("user", user.empty() ? user_input : user, llama_data);
950950
int new_len;
951-
if (apply_chat_template_with_error_handling(*chat_templates.default_template, llama_data, true, new_len, use_jinja) < 0) {
951+
if (apply_chat_template_with_error_handling(*chat_templates.template_default, llama_data, true, new_len, use_jinja) < 0) {
952952
return 1;
953953
}
954954

@@ -963,7 +963,7 @@ static int chat_loop(LlamaData & llama_data, const std::string & user, bool use_
963963
}
964964

965965
add_message("assistant", response, llama_data);
966-
if (apply_chat_template_with_error_handling(*chat_templates.default_template, llama_data, false, prev_len, use_jinja) < 0) {
966+
if (apply_chat_template_with_error_handling(*chat_templates.template_default, llama_data, false, prev_len, use_jinja) < 0) {
967967
return 1;
968968
}
969969
}

examples/server/server.cpp

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1745,15 +1745,15 @@ struct server_context {
17451745
llama_chat_message chat[] = {{"user", "test"}};
17461746

17471747
if (use_jinja) {
1748-
auto templates = llama_chat_templates_from_model(model, "");
1749-
GGML_ASSERT(templates.default_template);
1748+
auto templates = common_chat_templates_from_model(model, "");
1749+
GGML_ASSERT(templates.template_default);
17501750
try {
1751-
templates.default_template->apply({{
1751+
templates.template_default->apply({{
17521752
{"role", "user"},
17531753
{"content", "test"},
17541754
}}, json(), true);
1755-
if (templates.tool_use_template) {
1756-
templates.tool_use_template->apply({{
1755+
if (templates.template_tool_use) {
1756+
templates.template_tool_use->apply({{
17571757
{"role", "user"},
17581758
{"content", "test"},
17591759
}}, json(), true);
@@ -3631,8 +3631,8 @@ int main(int argc, char ** argv) {
36313631
auto get_chat_templates = [&ctx_server, &chat_templates_mutex, &chat_templates]() -> const llama_chat_templates & {
36323632
std::lock_guard<std::mutex> lock(chat_templates_mutex);
36333633
if (!chat_templates) {
3634-
chat_templates = llama_chat_templates_from_model(ctx_server.model, ctx_server.params_base.chat_template);
3635-
GGML_ASSERT(chat_templates->default_template);
3634+
chat_templates = common_chat_templates_from_model(ctx_server.model, ctx_server.params_base.chat_template);
3635+
GGML_ASSERT(chat_templates->template_default);
36363636
}
36373637
return *chat_templates;
36383638
};
@@ -3644,11 +3644,11 @@ int main(int argc, char ** argv) {
36443644
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
36453645
{ "total_slots", ctx_server.params_base.n_parallel },
36463646
{ "model_path", ctx_server.params_base.model },
3647-
{ "chat_template", templates.default_template->source() },
3647+
{ "chat_template", templates.template_default->source() },
36483648
{ "build_info", build_info },
36493649
};
3650-
if (ctx_server.params_base.use_jinja && templates.tool_use_template) {
3651-
data["chat_template_tool_use"] = templates.tool_use_template->source();
3650+
if (ctx_server.params_base.use_jinja && templates.template_tool_use) {
3651+
data["chat_template_tool_use"] = templates.template_tool_use->source();
36523652
}
36533653

36543654
res_ok(res, data);
@@ -3871,7 +3871,7 @@ int main(int argc, char ** argv) {
38713871

38723872
auto body = json::parse(req.body);
38733873
const auto & templates = get_chat_templates();
3874-
const auto & chat_template = body.contains("tools") && templates.tool_use_template ? *templates.tool_use_template : *templates.default_template;
3874+
const auto & chat_template = body.contains("tools") && templates.template_tool_use ? *templates.template_tool_use : *templates.template_default;
38753875
json data = oaicompat_completion_params_parse(body, chat_template, params.use_jinja);
38763876

38773877
return handle_completions_impl(
@@ -4290,8 +4290,8 @@ int main(int argc, char ** argv) {
42904290

42914291
// print sample chat example to make it clear which template is used
42924292
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
4293-
get_chat_templates().default_template->source().c_str(),
4294-
common_chat_format_example(*get_chat_templates().default_template, ctx_server.params_base.use_jinja).c_str());
4293+
get_chat_templates().template_default->source().c_str(),
4294+
common_chat_format_example(*get_chat_templates().template_default, ctx_server.params_base.use_jinja).c_str());
42954295

42964296
ctx_server.queue_tasks.on_new_task(std::bind(
42974297
&server_context::process_single_task, &ctx_server, std::placeholders::_1));

0 commit comments

Comments
 (0)