Skip to content

Commit 11c1f0c

Browse files
author
Olivier Chafik
committed
actually we want eos_token in the template to infer tool call examples, explicitly skipped in new template options
1 parent bc6d910 commit 11c1f0c

File tree

1 file changed

+2
-7
lines changed

1 file changed

+2
-7
lines changed

common/common.cpp

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1904,10 +1904,6 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model
19041904
default_template_src = CHATML_TEMPLATE_SRC;
19051905
}
19061906
}
1907-
std::string token_bos;
1908-
std::string token_eos;
1909-
// TODO: update logic that adds BOS and EOS tokens to the tokenized prompt, in favour of the template.
1910-
#if 0
19111907
auto vocab = llama_model_get_vocab(model);
19121908
const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) {
19131909
if (token == LLAMA_TOKEN_NULL) {
@@ -1920,9 +1916,8 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model
19201916
return common_token_to_piece(vocab, token, true);
19211917
}
19221918
};
1923-
token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
1924-
token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
1925-
#endif
1919+
auto token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
1920+
auto token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
19261921
try {
19271922
return {
19281923
has_explicit_template,

0 commit comments

Comments
 (0)