@@ -22,6 +22,11 @@ common_arg & common_arg::set_examples(std::initializer_list<enum llama_example>
2222 return *this ;
2323}
2424
25+ common_arg & common_arg::set_excludes (std::initializer_list<enum llama_example> excludes) {
26+ this ->excludes = std::move (excludes);
27+ return *this ;
28+ }
29+
2530common_arg & common_arg::set_env (const char * env) {
2631 help = help + " \n (env: " + env + " )" ;
2732 this ->env = env;
@@ -37,6 +42,10 @@ bool common_arg::in_example(enum llama_example ex) {
3742 return examples.find (ex) != examples.end ();
3843}
3944
45+ bool common_arg::is_exclude (enum llama_example ex) {
46+ return excludes.find (ex) != excludes.end ();
47+ }
48+
4049bool common_arg::get_value_from_env (std::string & output) {
4150 if (env == nullptr ) return false ;
4251 char * value = std::getenv (env);
@@ -420,7 +429,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
420429 * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
421430 */
422431 auto add_opt = [&](common_arg arg) {
423- if (arg.in_example (ex) || arg.in_example (LLAMA_EXAMPLE_COMMON)) {
432+ if (( arg.in_example (ex) || arg.in_example (LLAMA_EXAMPLE_COMMON)) && !arg. is_exclude (ex )) {
424433 ctx_arg.options .push_back (std::move (arg));
425434 }
426435 };
@@ -649,7 +658,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
649658 [](common_params & params, const std::string & value) {
650659 params.prompt = value;
651660 }
652- ));
661+ ). set_excludes ({LLAMA_EXAMPLE_SERVER}) );
653662 add_opt (common_arg (
654663 {" --no-perf" },
655664 string_format (" disable internal libllama performance timings (default: %s)" , params.no_perf ? " true" : " false" ),
@@ -673,7 +682,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
673682 params.prompt .pop_back ();
674683 }
675684 }
676- ));
685+ ). set_excludes ({LLAMA_EXAMPLE_SERVER}) );
677686 add_opt (common_arg (
678687 {" --in-file" }, " FNAME" ,
679688 " an input file (repeat to specify multiple files)" ,
@@ -700,7 +709,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
700709 params.prompt = ss.str ();
701710 fprintf (stderr, " Read %zu bytes from binary file %s\n " , params.prompt .size (), value.c_str ());
702711 }
703- ));
712+ ). set_excludes ({LLAMA_EXAMPLE_SERVER}) );
704713 add_opt (common_arg (
705714 {" -e" , " --escape" },
706715 string_format (" process escapes sequences (\\ n, \\ r, \\ t, \\ ', \\\" , \\\\ ) (default: %s)" , params.escape ? " true" : " false" ),
@@ -1512,15 +1521,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
15121521 {" --lora" }, " FNAME" ,
15131522 " path to LoRA adapter (can be repeated to use multiple adapters)" ,
15141523 [](common_params & params, const std::string & value) {
1515- params.lora_adapters .push_back ({ std::string (value), 1.0 });
1524+ params.lora_adapters .push_back ({ std::string (value), 1.0 , nullptr });
15161525 }
15171526 // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
15181527 ).set_examples ({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
15191528 add_opt (common_arg (
15201529 {" --lora-scaled" }, " FNAME" , " SCALE" ,
15211530 " path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)" ,
15221531 [](common_params & params, const std::string & fname, const std::string & scale) {
1523- params.lora_adapters .push_back ({ fname, std::stof (scale) });
1532+ params.lora_adapters .push_back ({ fname, std::stof (scale), nullptr });
15241533 }
15251534 // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
15261535 ).set_examples ({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
0 commit comments