Skip to content

Commit bec2314

Browse files
committed
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .github/workflows/build.yml # CMakeLists.txt # Makefile # README.md # common/CMakeLists.txt # docs/backend/SYCL.md # docs/build.md # docs/docker.md # examples/export-lora/export-lora.cpp # examples/main/README.md # examples/main/main.cpp # examples/run/README.md # examples/run/run.cpp # examples/server/README.md # examples/simple-chat/simple-chat.cpp # ggml/CMakeLists.txt # ggml/src/ggml-hip/CMakeLists.txt # src/CMakeLists.txt # tests/test-backend-ops.cpp # tests/test-chat-template.cpp
2 parents fb1274e + 466ea66 commit bec2314

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+4307
-580
lines changed

common/arg.cpp

Lines changed: 48 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,8 @@ static void common_params_handle_model_default(
134134
const std::string & model_url,
135135
std::string & hf_repo,
136136
std::string & hf_file,
137-
const std::string & hf_token) {
137+
const std::string & hf_token,
138+
const std::string & model_default) {
138139
if (!hf_repo.empty()) {
139140
// short-hand to avoid specifying --hf-file -> default it to --model
140141
if (hf_file.empty()) {
@@ -164,7 +165,7 @@ static void common_params_handle_model_default(
164165
model = fs_get_cache_file(string_split<std::string>(f, '/').back());
165166
}
166167
} else if (model.empty()) {
167-
model = DEFAULT_MODEL_PATH;
168+
model = model_default;
168169
}
169170
}
170171

@@ -300,8 +301,9 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context
300301
}
301302

302303
// TODO: refactor model params in a common struct
303-
common_params_handle_model_default(params.model, params.model_url, params.hf_repo, params.hf_file, params.hf_token);
304-
common_params_handle_model_default(params.vocoder.model, params.vocoder.model_url, params.vocoder.hf_repo, params.vocoder.hf_file, params.hf_token);
304+
common_params_handle_model_default(params.model, params.model_url, params.hf_repo, params.hf_file, params.hf_token, DEFAULT_MODEL_PATH);
305+
common_params_handle_model_default(params.speculative.model, params.speculative.model_url, params.speculative.hf_repo, params.speculative.hf_file, params.hf_token, "");
306+
common_params_handle_model_default(params.vocoder.model, params.vocoder.model_url, params.vocoder.hf_repo, params.vocoder.hf_file, params.hf_token, "");
305307

306308
if (params.escape) {
307309
string_process_escapes(params.prompt);
@@ -324,6 +326,14 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context
324326
throw std::invalid_argument("error: either --embedding or --reranking can be specified, but not both");
325327
}
326328

329+
if (!params.chat_template.empty() && !common_chat_verify_template(params.chat_template, params.use_jinja)) {
330+
throw std::runtime_error(string_format(
331+
"error: the supplied chat template is not supported: %s%s\n",
332+
params.chat_template.c_str(),
333+
params.use_jinja ? "" : "\nnote: llama.cpp was started without --jinja, we only support commonly used templates"
334+
));
335+
}
336+
327337
return true;
328338
}
329339

@@ -1630,6 +1640,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
16301640
params.hf_repo = value;
16311641
}
16321642
).set_env("LLAMA_ARG_HF_REPO"));
1643+
add_opt(common_arg(
1644+
{"-hfd", "-hfrd", "--hf-repo-draft"}, "<user>/<model>[:quant]",
1645+
"Same as --hf-repo, but for the draft model (default: unused)",
1646+
[](common_params & params, const std::string & value) {
1647+
params.speculative.hf_repo = value;
1648+
}
1649+
).set_env("LLAMA_ARG_HFD_REPO"));
16331650
add_opt(common_arg(
16341651
{"-hff", "--hf-file"}, "FILE",
16351652
"Hugging Face model file. If specified, it will override the quant in --hf-repo (default: unused)",
@@ -1939,24 +1956,44 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
19391956
}
19401957
}
19411958
).set_examples({LLAMA_EXAMPLE_SERVER}));
1959+
add_opt(common_arg(
1960+
{"--jinja"},
1961+
"use jinja template for chat (default: disabled)",
1962+
[](common_params & params) {
1963+
params.use_jinja = true;
1964+
}
1965+
).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MAIN}).set_env("LLAMA_ARG_JINJA"));
19421966
add_opt(common_arg(
19431967
{"--chat-template"}, "JINJA_TEMPLATE",
19441968
string_format(
19451969
"set custom jinja chat template (default: template taken from model's metadata)\n"
19461970
"if suffix/prefix are specified, template will be disabled\n"
1971+
"only commonly used templates are accepted (unless --jinja is set before this flag):\n"
19471972
"list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
19481973
),
19491974
[](common_params & params, const std::string & value) {
1950-
if (!common_chat_verify_template(value)) {
1951-
throw std::runtime_error(string_format(
1952-
"error: the supplied chat template is not supported: %s\n"
1953-
"note: llama.cpp does not use jinja parser, we only support commonly used templates\n",
1954-
value.c_str()
1955-
));
1956-
}
19571975
params.chat_template = value;
19581976
}
19591977
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
1978+
add_opt(common_arg(
1979+
{"--chat-template-file"}, "JINJA_TEMPLATE_FILE",
1980+
string_format(
1981+
"set custom jinja chat template file (default: template taken from model's metadata)\n"
1982+
"if suffix/prefix are specified, template will be disabled\n"
1983+
"only commonly used templates are accepted (unless --jinja is set before this flag):\n"
1984+
"list of built-in templates:\n%s", list_builtin_chat_templates().c_str()
1985+
),
1986+
[](common_params & params, const std::string & value) {
1987+
std::ifstream file(value);
1988+
if (!file) {
1989+
throw std::runtime_error(string_format("error: failed to open file '%s'\n", value.c_str()));
1990+
}
1991+
std::copy(
1992+
std::istreambuf_iterator<char>(file),
1993+
std::istreambuf_iterator<char>(),
1994+
std::back_inserter(params.chat_template));
1995+
}
1996+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE_FILE"));
19601997
add_opt(common_arg(
19611998
{"-sps", "--slot-prompt-similarity"}, "SIMILARITY",
19621999
string_format("how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity),

common/chat-template.hpp

Lines changed: 268 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,268 @@
1+
/*
2+
Copyright 2024 Google LLC
3+
4+
Use of this source code is governed by an MIT-style
5+
license that can be found in the LICENSE file or at
6+
https://opensource.org/licenses/MIT.
7+
*/
8+
// SPDX-License-Identifier: MIT
9+
#pragma once
10+
11+
#include "minja.hpp"
12+
#include <json.hpp>
13+
#include <string>
14+
#include <vector>
15+
16+
using json = nlohmann::ordered_json;
17+
18+
namespace minja {
19+
20+
class chat_template {
21+
public:
22+
23+
private:
24+
bool supports_tools_ = true;
25+
// Meta-Llama-3.1-8B-Instruct's template expects arguments to be an object.
26+
// Most other templates (and OpenAI's API) expect the arguments object to be stringified.
27+
bool requires_object_arguments_ = false;
28+
bool requires_typed_content_ = false;
29+
bool supports_system_role_ = true;
30+
bool supports_parallel_tool_calls_ = false;
31+
std::string source_;
32+
std::string bos_token_;
33+
std::string eos_token_;
34+
std::shared_ptr<minja::TemplateNode> template_root_;
35+
36+
std::string try_raw_render(
37+
const nlohmann::ordered_json & messages,
38+
const nlohmann::ordered_json & tools,
39+
bool add_generation_prompt,
40+
const nlohmann::ordered_json & extra_context = nlohmann::ordered_json()) const
41+
{
42+
try {
43+
auto prompt = apply(messages, tools, add_generation_prompt, extra_context, /* adjust_inputs= */ false);
44+
// fprintf(stderr, "Prompt: %s\n", prompt.c_str());
45+
return prompt;
46+
} catch (const std::exception & e) {
47+
// fprintf(stderr, "Error: %s\n", e.what());
48+
return "";
49+
}
50+
}
51+
52+
public:
53+
chat_template(const std::string & source, const std::string & bos_token, const std::string & eos_token)
54+
: source_(source), bos_token_(bos_token), eos_token_(eos_token)
55+
{
56+
template_root_ = minja::Parser::parse(source_, {
57+
/* .trim_blocks = */ true,
58+
/* .lstrip_blocks = */ true,
59+
/* .keep_trailing_newline = */ false,
60+
});
61+
supports_tools_ = source.find("tools") != std::string::npos;
62+
63+
auto renders_string_arguments =
64+
try_raw_render({
65+
{
66+
{"role", "user"},
67+
{"content", "Hey"}
68+
},
69+
{
70+
{"role", "assistant"},
71+
{"tool_calls", json::array({
72+
{
73+
{"id", "call_1___"},
74+
{"type", "function"},
75+
{"function", {
76+
{"arguments", "{\"code\": \"print('Hello, World!')\"}"},
77+
{"name", "ipython"},
78+
}},
79+
},
80+
})},
81+
}
82+
}, {}, false).find("{\"code\": \"print") != std::string::npos;
83+
if (!renders_string_arguments) {
84+
auto renders_object_arguments =
85+
try_raw_render({
86+
{
87+
{"role", "user"},
88+
{"content", "Hey"}
89+
},
90+
{
91+
{"role", "assistant"},
92+
{"tool_calls", json::array({
93+
{
94+
{"id", "call_1___"},
95+
{"type", "function"},
96+
{"function", {
97+
{"arguments", {
98+
{"code", "print('Hello, World!')"},
99+
}},
100+
{"name", "ipython"},
101+
}},
102+
},
103+
})},
104+
}
105+
}, {}, false).find("{\"code\": \"print") != std::string::npos;
106+
requires_object_arguments_ = renders_object_arguments;
107+
}
108+
supports_parallel_tool_calls_ = source.find("tool_call_id") != std::string::npos;
109+
110+
supports_system_role_ = try_raw_render({
111+
{{"role", "system"}, {"content", "<System Needle>"}},
112+
{{"role", "user"}, {"content", "Hey"}}
113+
}, {}, false).find("<System Needle>") != std::string::npos;
114+
115+
requires_typed_content_ = try_raw_render({{{"role", "user"}, {"content", "Hey"}}}, {}, false).find("Hey") == std::string::npos
116+
&& try_raw_render({{{"role", "user"}, {"content", {{{"type", "text"}, {"text", "Hey"}}}}}}, {}, false).find("Hey") != std::string::npos;
117+
}
118+
119+
const std::string & source() const { return source_; }
120+
const std::string & bos_token() const { return bos_token_; }
121+
const std::string & eos_token() const { return eos_token_; }
122+
bool supports_tools() const { return supports_tools_; }
123+
bool supports_parallel_tool_calls() const { return supports_parallel_tool_calls_; }
124+
125+
std::string apply(
126+
const nlohmann::ordered_json & messages,
127+
const nlohmann::ordered_json & tools,
128+
bool add_generation_prompt,
129+
const nlohmann::ordered_json & extra_context = nlohmann::ordered_json(),
130+
bool adjust_inputs = true) const
131+
{
132+
json actual_messages;
133+
134+
// First, "fix" messages so they have a chance to be rendered correctly by the template
135+
136+
if (adjust_inputs && (requires_object_arguments_ || !supports_system_role_ || !supports_tools_ || requires_typed_content_)) {
137+
actual_messages = json::array();
138+
139+
auto add_message = [&](const json & msg) {
140+
if (requires_typed_content_ && msg.contains("content") && !msg.at("content").is_null() && msg.at("content").is_string()) {
141+
actual_messages.push_back({
142+
{"role", msg.at("role")},
143+
{"content", {{
144+
{"type", "text"},
145+
{"text", msg.at("content")},
146+
}}},
147+
});
148+
} else {
149+
actual_messages.push_back(msg);
150+
}
151+
};
152+
153+
std::string pending_system;
154+
auto flush_sys = [&]() {
155+
if (!pending_system.empty()) {
156+
add_message({
157+
{"role", "user"},
158+
{"content", pending_system},
159+
});
160+
pending_system.clear();
161+
}
162+
};
163+
for (const auto & message_ : messages) {
164+
auto message = message_;
165+
if (!message.contains("role") || !message.contains("content")) {
166+
throw std::runtime_error("message must have 'role' and 'content' fields: " + message.dump());
167+
}
168+
std::string role = message.at("role");
169+
170+
if (message.contains("tool_calls")) {
171+
if (requires_object_arguments_ || !supports_tools_) {
172+
for (auto & tool_call : message.at("tool_calls")) {
173+
if (tool_call["type"] == "function") {
174+
auto & function = tool_call.at("function");
175+
std::string arguments = function.at("arguments");
176+
function["arguments"] = json::parse(arguments);
177+
}
178+
}
179+
}
180+
if (!supports_tools_) {
181+
auto content = message.at("content");
182+
auto tool_calls = json::array();
183+
for (const auto & tool_call : message.at("tool_calls")) {
184+
if (tool_call.at("type") != "function") {
185+
continue;
186+
}
187+
const auto & function = tool_call.at("function");
188+
auto tc = json {
189+
{"name", function.at("name")},
190+
{"arguments", function.at("arguments")},
191+
};
192+
if (tool_call.contains("id")) {
193+
tc["id"] = tool_call["id"];
194+
}
195+
tool_calls.push_back(tc);
196+
}
197+
auto obj = json {
198+
{"tool_calls", tool_calls},
199+
};
200+
if (!content.is_null() && content != "") {
201+
obj["content"] = content;
202+
}
203+
message["content"] = obj.dump(2);
204+
message.erase("tool_calls");
205+
}
206+
}
207+
if (!supports_tools_ && role == "tool") {
208+
message["role"] = "user";
209+
auto obj = json {
210+
{"tool_response", {
211+
{"tool", message.at("name")},
212+
{"content", message.at("content")},
213+
}},
214+
};
215+
if (message.contains("tool_call_id")) {
216+
obj["tool_response"]["tool_call_id"] = message.at("tool_call_id");
217+
}
218+
message["content"] = obj.dump(2);
219+
message.erase("name");
220+
}
221+
222+
if (!message["content"].is_null() && !supports_system_role_) {
223+
std::string content = message.at("content");
224+
if (role == "system") {
225+
if (!pending_system.empty()) pending_system += "\n";
226+
pending_system += content;
227+
continue;
228+
} else {
229+
if (role == "user") {
230+
if (!pending_system.empty()) {
231+
message["content"] = pending_system + (content.empty() ? "" : "\n" + content);
232+
pending_system.clear();
233+
}
234+
} else {
235+
flush_sys();
236+
}
237+
}
238+
}
239+
add_message(message);
240+
}
241+
flush_sys();
242+
} else {
243+
actual_messages = messages;
244+
}
245+
246+
auto context = minja::Context::make(json({
247+
{"messages", actual_messages},
248+
{"add_generation_prompt", add_generation_prompt},
249+
{"bos_token", bos_token_},
250+
{"eos_token", eos_token_},
251+
}));
252+
253+
if (!tools.is_null()) {
254+
auto tools_val = minja::Value(tools);
255+
context->set("tools", tools_val);
256+
}
257+
if (!extra_context.is_null()) {
258+
for (auto & kv : extra_context.items()) {
259+
minja::Value val(kv.value());
260+
context->set(kv.key(), val);
261+
}
262+
}
263+
264+
return template_root_->render(context);
265+
}
266+
};
267+
268+
} // namespace minja

0 commit comments

Comments
 (0)