Skip to content

Commit d5fa351

Browse files
author
ochafik
committed
Revert LLAMA_CHATML_TEMPLATE refactor
1 parent 81c0d43 commit d5fa351

File tree

3 files changed

+8
-15
lines changed

3 files changed

+8
-15
lines changed

common/common.cpp

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -74,15 +74,6 @@
7474
#endif
7575
#define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
7676

77-
const std::string LLAMA_CHATML_TEMPLATE(R"(
78-
{%- for message in messages -%}
79-
{{- "<|im_start|>" + message.role + "\n" + message.content + "<|im_end|>\n" -}}
80-
{%- endfor -%}
81-
{%- if add_generation_prompt -%}
82-
{{- "<|im_start|>assistant\n" -}}
83-
{%- endif -%}
84-
)");
85-
8677
//
8778
// CURL utils
8879
//
@@ -1846,7 +1837,14 @@ llama_chat_templates llama_chat_templates_from_model(const struct llama_model *
18461837
if (!tool_use_template_src.empty()) {
18471838
default_template_src = tool_use_template_src;
18481839
} else {
1849-
default_template_src = LLAMA_CHATML_TEMPLATE;
1840+
default_template_src = R"(
1841+
{%- for message in messages -%}
1842+
{{- "<|im_start|>" + message.role + "\n" + message.content + "<|im_end|>\n" -}}
1843+
{%- endfor -%}
1844+
{%- if add_generation_prompt -%}
1845+
{{- "<|im_start|>assistant\n" -}}
1846+
{%- endif -%}
1847+
)";
18501848
}
18511849
}
18521850
return {

common/common.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,6 @@
2626

2727
#define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"
2828

29-
extern const std::string LLAMA_CHATML_TEMPLATE;
30-
3129
struct common_adapter_lora_info {
3230
std::string path;
3331
float scale;

tests/test-chat-template.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
#include "llama.h"
99
#include "common.h"
1010
#include "chat-template.hpp"
11-
#include "llama-chat.h"
1211

1312
int main(void) {
1413
std::vector<llama_chat_message> conversation {
@@ -365,7 +364,5 @@ int main(void) {
365364
assert(fmt_single("llama3") == "<|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n");
366365
assert(fmt_single("gigachat") == "user<|role_sep|>How are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>");
367366

368-
assert(llm_chat_detect_template(LLAMA_CHATML_TEMPLATE) == LLM_CHAT_TEMPLATE_CHATML);
369-
370367
return 0;
371368
}

0 commit comments

Comments
 (0)