@@ -1824,9 +1824,6 @@ std::string common_chat_format_example(const llama_chat_template & tmpl, bool us
18241824llama_chat_templates common_chat_templates_from_model (const struct llama_model * model, const std::string & chat_template_override)
18251825{
18261826 auto vocab = llama_model_get_vocab (model);
1827- // TODO: consider detecting if the template needs bos / eos tokens and warn / error when missing.
1828- auto token_bos = llama_vocab_bos (vocab) == LLAMA_TOKEN_NULL ? " " : common_token_to_piece (vocab, llama_vocab_bos (vocab), true );
1829- auto token_eos = llama_vocab_eos (vocab) == LLAMA_TOKEN_NULL ? " " : common_token_to_piece (vocab, llama_vocab_eos (vocab), true );
18301827 std::string default_template_src = chat_template_override;
18311828 std::string template_tool_use_src = chat_template_override;
18321829 bool has_explicit_template = !chat_template_override.empty ();
@@ -1856,6 +1853,19 @@ llama_chat_templates common_chat_templates_from_model(const struct llama_model *
18561853 )" ;
18571854 }
18581855 }
1856+ const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) {
1857+ if (token == LLAMA_TOKEN_NULL) {
1858+ if (default_template_src.find (jinja_variable_name) != std::string::npos
1859+ || template_tool_use_src.find (jinja_variable_name) != std::string::npos) {
1860+ LOG_WRN (" %s: warning: vocab does not have a %s token, jinja template won't work as intended.\n " , __func__, name);
1861+ }
1862+ return std::string ();
1863+ } else {
1864+ return common_token_to_piece (vocab, token, true );
1865+ }
1866+ };
1867+ auto token_bos = get_token (llama_vocab_bos (vocab), " BOS" , " bos_token" );
1868+ auto token_eos = get_token (llama_vocab_eos (vocab), " EOS" , " eos_token" );
18591869 return {
18601870 has_explicit_template,
18611871 std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos),
0 commit comments