Skip to content

Commit bbe64fe

Browse files
committed
feat(granite*): Add granite chat template
Branch: GraniteThreeSupport This is a port of the work done in llama.cpp with a slight tweak for the tool call response: ggml-org/llama.cpp#10013 Signed-off-by: Gabe Goodhart <[email protected]>
1 parent 6a13182 commit bbe64fe

File tree

1 file changed

+13
-0
lines changed

1 file changed

+13
-0
lines changed

llama.cpp/llama.cpp

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19291,6 +19291,19 @@ static int32_t llama_chat_apply_template_internal(
1929119291
if (add_ass) {
1929219292
ss << "[|assistant|]";
1929319293
}
19294+
} else if (tmpl == "granite" || tmpl == "granitemoe" || tmpl_contains("<|start_of_role|>")) {
19295+
// IBM Granite template
19296+
for (const auto & message : chat) {
19297+
std::string role(message->role);
19298+
ss << "<|start_of_role|>" << role << "<|end_of_role|>";
19299+
if (role == "assistant_tool_call") {
19300+
ss << "<|tool_call|>";
19301+
}
19302+
ss << message->content << "<|end_of_text|>\n";
19303+
}
19304+
if (add_ass) {
19305+
ss << "<|start_of_role|>assistant<|end_of_role|>\n";
19306+
}
1929419307
} else {
1929519308
// template not supported
1929619309
return -1;

0 commit comments

Comments
 (0)