@@ -84,20 +84,20 @@ def cli():
8484 "The type of backend to use to run requests against. Defaults to 'openai_http'."
8585 f" Supported types: { ', ' .join (get_args (BackendType ))} "
8686 ),
87- default = GenerativeTextScenario .backend_type ,
87+ default = GenerativeTextScenario .model_fields [ " backend_type" ]. default ,
8888)
8989@click .option (
9090 "--backend-args" ,
9191 callback = parse_json ,
92- default = GenerativeTextScenario .backend_args ,
92+ default = GenerativeTextScenario .model_fields [ " backend_args" ]. default ,
9393 help = (
9494 "A JSON string containing any arguments to pass to the backend as a "
9595 "dict with **kwargs."
9696 ),
9797)
9898@click .option (
9999 "--model" ,
100- default = GenerativeTextScenario .model ,
100+ default = GenerativeTextScenario .model_fields [ " model" ]. default ,
101101 type = str ,
102102 help = (
103103 "The ID of the model to benchmark within the backend. "
@@ -106,7 +106,7 @@ def cli():
106106)
107107@click .option (
108108 "--processor" ,
109- default = GenerativeTextScenario .processor ,
109+ default = GenerativeTextScenario .model_fields [ " processor" ]. default ,
110110 type = str ,
111111 help = (
112112 "The processor or tokenizer to use to calculate token counts for statistics "
@@ -116,7 +116,7 @@ def cli():
116116)
117117@click .option (
118118 "--processor-args" ,
119- default = GenerativeTextScenario .processor_args ,
119+ default = GenerativeTextScenario .model_fields [ " processor_args" ]. default ,
120120 callback = parse_json ,
121121 help = (
122122 "A JSON string containing any arguments to pass to the processor constructor "
@@ -135,7 +135,7 @@ def cli():
135135)
136136@click .option (
137137 "--data-args" ,
138- default = GenerativeTextScenario .data_args ,
138+ default = GenerativeTextScenario .model_fields [ " data_args" ]. default ,
139139 callback = parse_json ,
140140 help = (
141141 "A JSON string containing any arguments to pass to the dataset creation "
@@ -144,7 +144,7 @@ def cli():
144144)
145145@click .option (
146146 "--data-sampler" ,
147- default = GenerativeTextScenario .data_sampler ,
147+ default = GenerativeTextScenario .model_fields [ " data_sampler" ]. default ,
148148 type = click .Choice (["random" ]),
149149 help = (
150150 "The data sampler type to use. 'random' will add a random shuffle on the data. "
@@ -162,7 +162,7 @@ def cli():
162162)
163163@click .option (
164164 "--rate" ,
165- default = GenerativeTextScenario .rate ,
165+ default = GenerativeTextScenario .model_fields [ " rate" ]. default ,
166166 callback = parse_number_str ,
167167 help = (
168168 "The rates to run the benchmark at. "
@@ -176,7 +176,7 @@ def cli():
176176@click .option (
177177 "--max-seconds" ,
178178 type = float ,
179- default = GenerativeTextScenario .max_seconds ,
179+ default = GenerativeTextScenario .model_fields [ " max_seconds" ]. default ,
180180 help = (
181181 "The maximum number of seconds each benchmark can run for. "
182182 "If None, will run until max_requests or the data is exhausted."
@@ -185,7 +185,7 @@ def cli():
185185@click .option (
186186 "--max-requests" ,
187187 type = int ,
188- default = GenerativeTextScenario .max_requests ,
188+ default = GenerativeTextScenario .model_fields [ " max_requests" ]. default ,
189189 help = (
190190 "The maximum number of requests each benchmark can run for. "
191191 "If None, will run until max_seconds or the data is exhausted."
@@ -194,7 +194,7 @@ def cli():
194194@click .option (
195195 "--warmup-percent" ,
196196 type = float ,
197- default = GenerativeTextScenario .warmup_percent ,
197+ default = GenerativeTextScenario .model_fields [ " warmup_percent" ]. default ,
198198 help = (
199199 "The percent of the benchmark (based on max-seconds, max-requets, "
200200 "or lenth of dataset) to run as a warmup and not include in the final results. "
@@ -204,7 +204,7 @@ def cli():
204204@click .option (
205205 "--cooldown-percent" ,
206206 type = float ,
207- default = GenerativeTextScenario .cooldown_percent ,
207+ default = GenerativeTextScenario .model_fields [ " cooldown_percent" ]. default ,
208208 help = (
209209 "The percent of the benchmark (based on max-seconds, max-requets, or lenth "
210210 "of dataset) to run as a cooldown and not include in the final results. "
@@ -214,19 +214,19 @@ def cli():
214214@click .option (
215215 "--disable-progress" ,
216216 is_flag = True ,
217- default = not GenerativeTextScenario .show_progress ,
217+ default = not GenerativeTextScenario .model_fields [ " show_progress" ]. default ,
218218 help = "Set this flag to disable progress updates to the console" ,
219219)
220220@click .option (
221221 "--display-scheduler-stats" ,
222222 is_flag = True ,
223- default = GenerativeTextScenario .show_progress_scheduler_stats ,
223+ default = GenerativeTextScenario .model_fields [ " show_progress_scheduler_stats" ]. default ,
224224 help = "Set this flag to display stats for the processes running the benchmarks" ,
225225)
226226@click .option (
227227 "--disable-console-outputs" ,
228228 is_flag = True ,
229- default = not GenerativeTextScenario .output_console ,
229+ default = not GenerativeTextScenario .model_fields [ " output_console" ]. default ,
230230 help = "Set this flag to disable console output" ,
231231)
232232@click .option (
@@ -243,7 +243,7 @@ def cli():
243243@click .option (
244244 "--output-extras" ,
245245 callback = parse_json ,
246- default = GenerativeTextScenario .output_extras ,
246+ default = GenerativeTextScenario .model_fields [ " output_extras" ]. default ,
247247 help = "A JSON string of extra data to save with the output benchmarks" ,
248248)
249249@click .option (
@@ -253,11 +253,11 @@ def cli():
253253 "The number of samples to save in the output file. "
254254 "If None (default), will save all samples."
255255 ),
256- default = GenerativeTextScenario .output_sampling ,
256+ default = GenerativeTextScenario .model_fields [ " output_sampling" ]. default ,
257257)
258258@click .option (
259259 "--random-seed" ,
260- default = GenerativeTextScenario .random_seed ,
260+ default = GenerativeTextScenario .model_fields [ " random_seed" ]. default ,
261261 type = int ,
262262 help = "The random seed to use for benchmarking to ensure reproducibility." ,
263263)
0 commit comments