@@ -1283,7 +1283,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
12831283 [](common_params & params) {
12841284 params.use_color = true ;
12851285 }
1286- ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
1286+ ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
12871287 add_opt (common_arg (
12881288 {" -t" , " --threads" }, " N" ,
12891289 string_format (" number of threads to use during generation (default: %d)" , params.cpuparams .n_threads ),
@@ -1416,7 +1416,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
14161416 add_opt (common_arg (
14171417 {" -n" , " --predict" , " --n-predict" }, " N" ,
14181418 string_format (
1419- ex == LLAMA_EXAMPLE_MAIN || ex == LLAMA_EXAMPLE_INFILL
1419+ ex == LLAMA_EXAMPLE_MAIN
14201420 ? " number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)"
14211421 : " number of tokens to predict (default: %d, -1 = infinity)" ,
14221422 params.n_predict ),
@@ -1655,15 +1655,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
16551655 params.input_prefix = value;
16561656 params.enable_chat_template = false ;
16571657 }
1658- ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL }));
1658+ ).set_examples ({LLAMA_EXAMPLE_MAIN}));
16591659 add_opt (common_arg (
16601660 {" --in-suffix" }, " STRING" ,
16611661 " string to suffix after user inputs with (default: empty)" ,
16621662 [](common_params & params, const std::string & value) {
16631663 params.input_suffix = value;
16641664 params.enable_chat_template = false ;
16651665 }
1666- ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL }));
1666+ ).set_examples ({LLAMA_EXAMPLE_MAIN}));
16671667 add_opt (common_arg (
16681668 {" --no-warmup" },
16691669 " skip warming up the model with an empty run" ,
@@ -1680,7 +1680,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
16801680 [](common_params & params) {
16811681 params.spm_infill = true ;
16821682 }
1683- ).set_examples ({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL }));
1683+ ).set_examples ({LLAMA_EXAMPLE_SERVER}));
16841684 add_opt (common_arg (
16851685 {" --samplers" }, " SAMPLERS" ,
16861686 string_format (" samplers that will be used for generation in the order, separated by \' ;\'\n (default: %s)" , sampler_type_names.c_str ()),
@@ -2892,7 +2892,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
28922892 [](common_params & params) {
28932893 params.simple_io = true ;
28942894 }
2895- ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL }));
2895+ ).set_examples ({LLAMA_EXAMPLE_MAIN}));
28962896 add_opt (common_arg (
28972897 {" --positive-file" }, " FNAME" ,
28982898 string_format (" positive prompts file, one prompt per line (default: '%s')" , params.cvector_positive_file .c_str ()),
0 commit comments