24
24
from guidellm .benchmark .progress import BenchmarkerProgressGroup
25
25
from guidellm .benchmark .scenario import enable_scenarios
26
26
from guidellm .benchmark .types import (
27
- OutputFormatType ,
28
- DataInputType ,
29
- ProcessorInputType ,
30
- ProgressInputType ,
31
- AggregatorInputType
27
+ AggregatorInputT ,
28
+ DataInputT ,
29
+ OutputFormatT ,
30
+ ProcessorInputT ,
31
+ ProgressInputT ,
32
32
)
33
33
from guidellm .request import GenerativeRequestLoader
34
34
from guidellm .scheduler import (
49
49
50
50
# Helper functions
51
51
52
+
52
53
async def initialize_backend (
53
54
backend : BackendType | Backend ,
54
55
target : str ,
55
56
model : str | None ,
56
57
backend_kwargs : dict [str , Any ] | None ,
57
58
) -> Backend :
58
59
backend = (
59
- Backend .create (
60
- backend , target = target , model = model , ** (backend_kwargs or {})
61
- )
60
+ Backend .create (backend , target = target , model = model , ** (backend_kwargs or {}))
62
61
if not isinstance (backend , Backend )
63
62
else backend
64
63
)
@@ -95,18 +94,19 @@ async def resolve_profile(
95
94
)
96
95
return profile
97
96
97
+
98
98
async def resolve_output_formats (
99
- output_formats : OutputFormatType ,
99
+ output_formats : OutputFormatT ,
100
100
output_path : str | Path | None ,
101
101
) -> dict [str , GenerativeBenchmarkerOutput ]:
102
- output_formats = GenerativeBenchmarkerOutput .resolve (
102
+ return GenerativeBenchmarkerOutput .resolve (
103
103
output_formats = (output_formats or {}), output_path = output_path
104
104
)
105
- return output_formats
105
+
106
106
107
107
async def finalize_outputs (
108
108
report : GenerativeBenchmarksReport ,
109
- resolved_output_formats : dict [str , GenerativeBenchmarkerOutput ]
109
+ resolved_output_formats : dict [str , GenerativeBenchmarkerOutput ],
110
110
):
111
111
output_format_results = {}
112
112
for key , output in resolved_output_formats .items ():
@@ -122,7 +122,7 @@ async def finalize_outputs(
122
122
@enable_scenarios
123
123
async def benchmark_generative_text ( # noqa: C901
124
124
target : str ,
125
- data : DataInputType ,
125
+ data : DataInputT ,
126
126
profile : StrategyType | ProfileType | Profile ,
127
127
rate : list [float ] | None = None ,
128
128
random_seed : int = 42 ,
@@ -131,18 +131,18 @@ async def benchmark_generative_text( # noqa: C901
131
131
backend_kwargs : dict [str , Any ] | None = None ,
132
132
model : str | None = None ,
133
133
# Data configuration
134
- processor : ProcessorInputType | None = None ,
134
+ processor : ProcessorInputT | None = None ,
135
135
processor_args : dict [str , Any ] | None = None ,
136
136
data_args : dict [str , Any ] | None = None ,
137
137
data_sampler : Literal ["random" ] | None = None ,
138
138
# Output configuration
139
139
output_path : str | Path | None = _CURRENT_WORKING_DIR ,
140
- output_formats : OutputFormatType = ("console" , "json" , "html" , "csv" ),
140
+ output_formats : OutputFormatT = ("console" , "json" , "html" , "csv" ),
141
141
# Updates configuration
142
- progress : ProgressInputType | None = None ,
142
+ progress : ProgressInputT | None = None ,
143
143
print_updates : bool = False ,
144
144
# Aggregators configuration
145
- add_aggregators : AggregatorInputType | None = None ,
145
+ add_aggregators : AggregatorInputT | None = None ,
146
146
warmup : float | None = None ,
147
147
cooldown : float | None = None ,
148
148
request_samples : int | None = 20 ,
@@ -259,7 +259,9 @@ async def benchmark_generative_text( # noqa: C901
259
259
)
260
260
261
261
with console .print_update_step (title = "Resolving output formats" ) as console_step :
262
- resolved_output_formats = await resolve_output_formats (output_formats , output_path )
262
+ resolved_output_formats = await resolve_output_formats (
263
+ output_formats , output_path
264
+ )
263
265
console_step .finish (
264
266
title = "Output formats resolved" ,
265
267
details = {key : str (val ) for key , val in resolved_output_formats .items ()},
@@ -314,7 +316,7 @@ async def benchmark_generative_text( # noqa: C901
314
316
async def reimport_benchmarks_report (
315
317
file : Path ,
316
318
output_path : Path | None ,
317
- output_formats : OutputFormatType = ("console" , "json" , "html" , "csv" ),
319
+ output_formats : OutputFormatT = ("console" , "json" , "html" , "csv" ),
318
320
) -> tuple [GenerativeBenchmarksReport , dict [str , Any ]]:
319
321
"""
320
322
The command-line entry point for re-importing and displaying an
@@ -326,10 +328,15 @@ async def reimport_benchmarks_report(
326
328
title = f"Loading benchmarks from { file } "
327
329
) as console_step :
328
330
report = GenerativeBenchmarksReport .load_file (file )
329
- console_step .finish (f"Import of old benchmarks complete; loaded { len (report .benchmarks )} benchmark(s)" )
331
+ console_step .finish (
332
+ "Import of old benchmarks complete;"
333
+ f" loaded { len (report .benchmarks )} benchmark(s)"
334
+ )
330
335
331
336
with console .print_update_step (title = "Resolving output formats" ) as console_step :
332
- resolved_output_formats = await resolve_output_formats (output_formats , output_path )
337
+ resolved_output_formats = await resolve_output_formats (
338
+ output_formats , output_path
339
+ )
333
340
console_step .finish (
334
341
title = "Output formats resolved" ,
335
342
details = {key : str (val ) for key , val in resolved_output_formats .items ()},
0 commit comments