Skip to content

Commit f2557e3

Browse files
committed
feat(granite*): Add granite chat template
Branch: GraniteThreeSupport This is a port of the work done in llama.cpp with a slight tweak for the tool call response: ggml-org/llama.cpp#10013 Signed-off-by: Gabe Goodhart <[email protected]>
1 parent 070fd01 commit f2557e3

File tree

1 file changed

+13
-0
lines changed

1 file changed

+13
-0
lines changed

llama.cpp/llama.cpp

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19279,6 +19279,19 @@ static int32_t llama_chat_apply_template_internal(
1927919279
if (add_ass) {
1928019280
ss << "[|assistant|]";
1928119281
}
19282+
} else if (tmpl == "granite" || tmpl == "granitemoe" || tmpl_contains("<|start_of_role|>")) {
19283+
// IBM Granite template
19284+
for (const auto & message : chat) {
19285+
std::string role(message->role);
19286+
ss << "<|start_of_role|>" << role << "<|end_of_role|>";
19287+
if (role == "assistant_tool_call") {
19288+
ss << "<|tool_call|>";
19289+
}
19290+
ss << message->content << "<|end_of_text|>\n";
19291+
}
19292+
if (add_ass) {
19293+
ss << "<|start_of_role|>assistant<|end_of_role|>\n";
19294+
}
1928219295
} else {
1928319296
// template not supported
1928419297
return -1;

0 commit comments

Comments
 (0)