Skip to content

Commit de6f155

Browse files
committed
llama : better legacy chat template for rwkv
Signed-off-by: Molly Sophia <[email protected]>
1 parent 0b8c076 commit de6f155

File tree

1 file changed

+11
-6
lines changed

1 file changed

+11
-6
lines changed

src/llama-chat.cpp

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -528,12 +528,17 @@ int32_t llm_chat_apply_template(
528528
}
529529
} else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) {
530530
// this template requires the model to have "\n\n" as EOT token
531-
for (auto message : chat) {
532-
std::string role(message->role);
533-
if (role == "user") {
534-
ss << "User: " << message->content << "\n\nAssistant:";
535-
} else {
536-
ss << message->content << "\n\n";
531+
for (size_t i = 0; i < chat.size(); i++) {
532+
std::string role(chat[i]->role);
533+
if (role == "system") {
534+
ss << trim(chat[i]->content) << "\n\n";
535+
} else if (role == "user") {
536+
ss << "User: " << trim(chat[i]->content) << "\n\n";
537+
if (i == chat.size() - 1) {
538+
ss << "Assistant:";
539+
}
540+
} else if (role == "assistant") {
541+
ss << "Assistant: " << trim(chat[i]->content) << "\n\n";
537542
}
538543
}
539544
} else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) {

0 commit comments

Comments
 (0)