@@ -348,6 +348,18 @@ bool common_params_parse(int argc, char ** argv, common_params & params, llama_e
348348    return  true ;
349349}
350350
351+ static  std::string list_builtin_chat_templates () {
352+     std::vector<const  char  *> supported_tmpl;
353+     int32_t  res = llama_chat_builtin_templates (nullptr , 0 );
354+     supported_tmpl.resize (res);
355+     res = llama_chat_builtin_templates (supported_tmpl.data (), supported_tmpl.size ());
356+     std::ostringstream msg;
357+     for  (auto  & tmpl : supported_tmpl) {
358+         msg << tmpl << (&tmpl == &supported_tmpl.back () ? " "   : " , "  );
359+     }
360+     return  msg.str ();
361+ }
362+ 
351363common_params_context common_params_parser_init (common_params & params, llama_example ex, void (*print_usage)(int , char  **)) {
352364    //  load dynamic backends
353365    ggml_backend_load_all ();
@@ -1814,9 +1826,11 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
18141826    ).set_examples ({LLAMA_EXAMPLE_SERVER}));
18151827    add_opt (common_arg (
18161828        {" --chat-template"  }, " JINJA_TEMPLATE"  ,
1817-         " set custom jinja chat template (default: template taken from model's metadata)\n " 
1818-         " if suffix/prefix are specified, template will be disabled\n " 
1819-         " only commonly used templates are accepted:\n https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template"  ,
1829+         string_format (
1830+             " set custom jinja chat template (default: template taken from model's metadata)\n " 
1831+             " if suffix/prefix are specified, template will be disabled\n " 
1832+             " list of built-in templates:\n %s"  , list_builtin_chat_templates ().c_str ()
1833+         ),
18201834        [](common_params & params, const  std::string & value) {
18211835            if  (!common_chat_verify_template (value)) {
18221836                throw  std::runtime_error (string_format (
0 commit comments