Skip to content

Commit f9ab15a

Browse files
committed
feat(granite*): Add granite chat template
Branch: GraniteThreeSupport This is a port of the work done in llama.cpp with a slight tweak for the tool call response: ggml-org/llama.cpp#10013 Signed-off-by: Gabe Goodhart <[email protected]>
1 parent fc90175 commit f9ab15a

File tree

1 file changed

+13
-0
lines changed

1 file changed

+13
-0
lines changed

llama.cpp/llama.cpp

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19294,6 +19294,19 @@ static int32_t llama_chat_apply_template_internal(
1929419294
if (add_ass) {
1929519295
ss << "[|assistant|]";
1929619296
}
19297+
} else if (tmpl == "granite" || tmpl == "granitemoe" || tmpl_contains("<|start_of_role|>")) {
19298+
// IBM Granite template
19299+
for (const auto & message : chat) {
19300+
std::string role(message->role);
19301+
ss << "<|start_of_role|>" << role << "<|end_of_role|>";
19302+
if (role == "assistant_tool_call") {
19303+
ss << "<|tool_call|>";
19304+
}
19305+
ss << message->content << "<|end_of_text|>\n";
19306+
}
19307+
if (add_ass) {
19308+
ss << "<|start_of_role|>assistant<|end_of_role|>\n";
19309+
}
1929719310
} else {
1929819311
// template not supported
1929919312
return -1;

0 commit comments

Comments
 (0)