We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 4c9388f commit e74773eCopy full SHA for e74773e
src/llama.cpp
@@ -21713,7 +21713,17 @@ static int32_t llama_chat_apply_template_internal(
21713
ss << message->content << "\n\n";
21714
}
21715
21716
- } else {
+ } else if (tmpl == "granite" || tmpl_contains("<|start_of_role|>")) {
21717
+ // IBM Granite template
21718
+ for (const auto& message : chat) {
21719
+ std::string role(message->role);
21720
+ ss << "<|start_of_role|>" << role << "<|end_of_role|>" << "\n"
21721
+ << message->content << "<|end_of_text|>\n";
21722
+ }
21723
+ if (add_ass) {
21724
+ ss << "<|start_of_role|>assistant<|end_of_role|>\n";
21725
21726
+} else {
21727
// template not supported
21728
return -1;
21729
0 commit comments