11use clap:: error:: ErrorKind :: InvalidValue ;
2- use clap:: { Error , Parser } ;
2+ use clap:: { ArgGroup , Error , Parser } ;
33use inference_benchmarker:: { run, RunConfiguration , TokenizeOptions } ;
44use log:: { debug, error} ;
55use reqwest:: Url ;
@@ -8,7 +8,8 @@ use std::time::Duration;
88use tokio:: sync:: broadcast;
99
1010#[ derive( Parser , Debug ) ]
11- #[ clap( author, version, about, long_about = None ) ]
11+ #[ clap( author, version, about, long_about = None , group( ArgGroup :: new( "group_profile" ) . multiple( true ) ) , group( ArgGroup :: new( "group_manual" ) . multiple( true ) . conflicts_with( "group_profile" ) )
12+ ) ]
1213struct Args {
1314 /// The name of the tokenizer to use
1415 #[ clap( short, long, env) ]
@@ -19,24 +20,10 @@ struct Args {
1920 model_name : Option < String > ,
2021
2122 /// The maximum number of virtual users to use
22- #[ clap(
23- default_value = "128" ,
24- short,
25- long,
26- env,
27- group = "group_manual" ,
28- conflicts_with = "group_profile"
29- ) ]
23+ #[ clap( default_value = "128" , short, long, env, group = "group_manual" ) ]
3024 max_vus : u64 ,
3125 /// The duration of each benchmark step
32- #[ clap(
33- default_value = "120s" ,
34- short,
35- long,
36- env,
37- group = "group_manual" ,
38- conflicts_with = "group_profile"
39- ) ]
26+ #[ clap( default_value = "120s" , short, long, env, group = "group_manual" ) ]
4027 #[ arg( value_parser = parse_duration) ]
4128 duration : Duration ,
4229 /// A list of rates of requests to send per second (only valid for the ConstantArrivalRate benchmark).
@@ -47,27 +34,13 @@ struct Args {
4734 #[ clap( default_value = "10" , long, env) ]
4835 num_rates : u64 ,
4936 /// A benchmark profile to use
50- #[ clap( long, env, group = "group_profile" , conflicts_with = "group_manual" ) ]
37+ #[ clap( long, env, group = "group_profile" ) ]
5138 profile : Option < String > ,
5239 /// The kind of benchmark to run (throughput, sweep, optimum)
53- #[ clap(
54- default_value = "sweep" ,
55- short,
56- long,
57- env,
58- group = "group_manual" ,
59- conflicts_with = "group_profile"
60- ) ]
40+ #[ clap( default_value = "sweep" , short, long, env, group = "group_manual" ) ]
6141 benchmark_kind : String ,
6242 /// The duration of the prewarm step ran before the benchmark to warm up the backend (JIT, caches, etc.)
63- #[ clap(
64- default_value = "30s" ,
65- short,
66- long,
67- env,
68- group = "group_manual" ,
69- conflicts_with = "group_profile"
70- ) ]
43+ #[ clap( default_value = "30s" , short, long, env, group = "group_manual" ) ]
7144 #[ arg( value_parser = parse_duration) ]
7245 warmup : Duration ,
7346 /// The URL of the backend to benchmark. Must be compatible with OpenAI Message API
@@ -91,8 +64,7 @@ struct Args {
9164 long,
9265 env,
9366 value_parser( parse_tokenizer_options) ,
94- group = "group_manual" ,
95- conflicts_with = "group_profile"
67+ group = "group_manual"
9668 ) ]
9769 prompt_options : Option < TokenizeOptions > ,
9870 /// Constraints for the generated text.
@@ -108,26 +80,23 @@ struct Args {
10880 long,
10981 env,
11082 value_parser( parse_tokenizer_options) ,
111- group = "group_manual" ,
112- conflicts_with = "group_profile"
83+ group = "group_manual"
11384 ) ]
11485 decode_options : Option < TokenizeOptions > ,
11586 /// Hugging Face dataset to use for prompt generation
11687 #[ clap(
11788 default_value = "hlarcher/share_gpt_small" ,
11889 long,
11990 env,
120- group = "group_manual" ,
121- conflicts_with = "group_profile"
91+ group = "group_manual"
12292 ) ]
12393 dataset : String ,
12494 /// File to use in the Dataset
12595 #[ clap(
12696 default_value = "share_gpt_filtered_small.json" ,
12797 long,
12898 env,
129- group = "group_manual" ,
130- conflicts_with = "group_profile"
99+ group = "group_manual"
131100 ) ]
132101 dataset_file : String ,
133102 /// Extra metadata to include in the benchmark results file, comma-separated key-value pairs.
0 commit comments