Skip to content

Commit 7158ab5

Browse files
committed
Fix from-file
1 parent c72c661 commit 7158ab5

File tree

2 files changed

+105
-119
lines changed

2 files changed

+105
-119
lines changed

src/guidellm/__main__.py

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -473,31 +473,38 @@ def run(
473473
)
474474
@click.option(
475475
"--output-path",
476-
type=click.Path(file_okay=True, dir_okay=True, exists=False),
477-
default=None,
478-
is_flag=False,
479-
flag_value=Path.cwd() / "benchmarks_reexported.json",
476+
type=click.Path(),
477+
default=Path.cwd(),
478+
help=(
479+
"Allows re-exporting the benchmarks to other formats. "
480+
"The path to save the output formats to, if the format is a file type. "
481+
"If it is a directory, it will save all output formats selected under it. "
482+
"If it is a file, it will save the corresponding output format to that file. "
483+
"Any output formats that were given that do not match the file extension will "
484+
"be saved in the parent directory of the file path. "
485+
"Defaults to the current working directory. "
486+
),
487+
)
488+
@click.option(
489+
"--output-formats",
490+
multiple=True,
491+
type=str,
492+
default=("console", "json"), # ("console", "json", "html", "csv")
480493
help=(
481-
"Allows re-exporting the benchmarks to another format. "
482-
"The path to save the output to. If it is a directory, "
483-
"it will save benchmarks.json under it. "
484-
"Otherwise, json, yaml, or csv files are supported for output types "
485-
"which will be read from the extension for the file path. "
486-
"This input is optional. If the output path flag is not provided, "
487-
"the benchmarks will not be reexported. If the flag is present but "
488-
"no value is specified, it will default to the current directory "
489-
"with the file name `benchmarks_reexported.json`."
494+
"The output formats to use for the benchmark results. "
495+
"Defaults to console, json, html, and csv where the file formats "
496+
"will be saved at the specified output path."
490497
),
491498
)
492-
def from_file(path, output_path):
499+
def from_file(path, output_path, output_formats):
493500
"""
494501
Load and optionally re-export a previously saved benchmark report.
495502
496503
Imports benchmark results from a saved file and provides optional conversion
497504
to different output formats. Supports JSON, YAML, and CSV export formats
498505
based on the output file extension.
499506
"""
500-
asyncio.run(reimport_benchmarks_report(path, output_path))
507+
asyncio.run(reimport_benchmarks_report(path, output_path, output_formats))
501508

502509

503510
@cli.command(

src/guidellm/benchmark/entrypoints.py

Lines changed: 83 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
NonDistributedEnvironment,
4242
StrategyType,
4343
)
44-
from guidellm.utils import Console, InfoMixin, ConsoleUpdateStep
44+
from guidellm.utils import Console, InfoMixin
4545

4646
__all__ = [
4747
"benchmark_generative_text",
@@ -63,8 +63,6 @@
6363
| Path
6464
)
6565

66-
ProcessorType = str | Path | PreTrainedTokenizerBase
67-
6866
OutputFormatType = (
6967
tuple[str, ...]
7068
| list[str]
@@ -73,15 +71,11 @@
7371
)
7472

7573

76-
# TODO: Determine final location of the helper functions.
77-
78-
7974
async def initialize_backend(
8075
backend: BackendType | Backend,
8176
target: str,
8277
model: str | None,
8378
backend_kwargs: dict[str, Any] | None,
84-
console_step: ConsoleUpdateStep,
8579
) -> Backend:
8680
backend = (
8781
Backend.create(
@@ -90,121 +84,46 @@ async def initialize_backend(
9084
if not isinstance(backend, Backend)
9185
else backend
9286
)
93-
console_step.update(f"{backend.__class__.__name__} backend initialized")
9487
await backend.process_startup()
9588
await backend.validate()
96-
console_step.finish(
97-
title=f"{backend.__class__.__name__} backend initialized",
98-
details=backend.info,
99-
status_level="success",
100-
)
10189
return backend
10290

10391

104-
async def resolve_processor(
105-
processor: ProcessorType | None,
106-
model: str | None,
107-
backend: BackendType | Backend,
108-
console_step: ConsoleUpdateStep,
109-
) -> ProcessorType:
110-
if processor is not None:
111-
console_step.finish(
112-
title="Processor resolved",
113-
details=f"Using processor '{processor}'",
114-
status_level="success",
115-
)
116-
elif model is not None:
117-
console_step.finish(
118-
title="Processor resolved",
119-
details=f"Using model '{model}' as processor",
120-
status_level="success",
121-
)
122-
processor = model
123-
else:
124-
console_step.update(
125-
title="Resolving processor from backend.default_model",
126-
status_level="info",
127-
)
128-
processor = await backend.default_model()
129-
console_step.finish(
130-
title="Processor resolved",
131-
details=(
132-
f"Using model '{processor}' from backend "
133-
f"{backend.__class__.__name__} as processor"
134-
),
135-
status_level="success",
136-
)
137-
await backend.process_shutdown()
138-
return processor
139-
140-
141-
async def init_request_loader(
142-
data: DataType,
143-
data_args: dict[str, Any] | None,
144-
processor: ProcessorType,
145-
console_step: ConsoleUpdateStep,
146-
) -> GenerativeRequestLoader:
147-
request_loader = GenerativeRequestLoader(
148-
data=data,
149-
data_args=data_args,
150-
processor=processor,
151-
processor_args=processor_args,
152-
shuffle=data_sampler == "random",
153-
random_seed=random_seed,
154-
)
155-
unique_requests = request_loader.num_unique_items(raise_err=False)
156-
console_step.finish(
157-
title=(
158-
f"Request loader initialized with {unique_requests} unique requests "
159-
f"from {data}"
160-
),
161-
details=InfoMixin.extract_from_obj(request_loader),
162-
status_level="success",
163-
)
164-
return request_loader
165-
16692
async def resolve_profile(
16793
constraint_inputs: dict[str, int | float],
168-
profile: Profile | None,
94+
profile: Profile | str | None,
16995
rate: list[float] | None,
17096
random_seed: int,
17197
constraints: dict[str, ConstraintInitializer | Any],
172-
console_step: ConsoleUpdateStep,
17398
):
17499
for key, val in constraint_inputs.items():
175100
if val is not None:
176101
constraints[key] = val
177102
if not isinstance(profile, Profile):
178-
profile = Profile.create(
179-
rate_type=profile,
180-
rate=rate,
181-
random_seed=random_seed,
182-
constraints={**constraints},
183-
)
103+
if isinstance(profile, str):
104+
profile = Profile.create(
105+
rate_type=profile,
106+
rate=rate,
107+
random_seed=random_seed,
108+
constraints={**constraints},
109+
)
110+
else:
111+
raise ValueError(f"Expected string for profile; got {type(profile)}")
112+
184113
elif constraints:
185114
raise ValueError(
186115
"Constraints must be empty when providing a Profile instance. "
187116
f"Provided constraints: {constraints} ; provided profile: {profile}"
188117
)
189-
console_step.finish(
190-
title=f"{profile.__class__.__name__} profile resolved",
191-
details=InfoMixin.extract_from_obj(profile),
192-
status_level="success",
193-
)
118+
return profile
194119

195120
async def resolve_output_formats(
196121
output_formats: OutputFormatType,
197122
output_path: str | Path | None,
198-
console_step: ConsoleUpdateStep,
199123
) -> dict[str, GenerativeBenchmarkerOutput]:
200124
output_formats = GenerativeBenchmarkerOutput.resolve(
201125
output_formats=(output_formats or {}), output_path=output_path
202126
)
203-
console_step.finish(
204-
title="Output formats resolved",
205-
details={key: str(val) for key, val in output_formats.items()},
206-
status_level="success",
207-
)
208127
return output_formats
209128

210129
async def finalize_outputs(
@@ -217,8 +136,6 @@ async def finalize_outputs(
217136
output_format_results[key] = output_result
218137
return output_format_results
219138

220-
# End of helper functions.
221-
222139

223140
async def benchmark_with_scenario(scenario: Scenario, **kwargs):
224141
"""
@@ -273,20 +190,68 @@ async def benchmark_generative_text( # noqa: C901
273190
with console.print_update_step(
274191
title=f"Initializing backend {backend}"
275192
) as console_step:
276-
backend = await initialize_backend(backend)
193+
backend = await initialize_backend(backend, target, model, backend_kwargs)
194+
console_step.finish(
195+
title=f"{backend.__class__.__name__} backend initialized",
196+
details=backend.info,
197+
status_level="success",
198+
)
277199

278200
with console.print_update_step(title="Resolving processor") as console_step:
279-
await resolve_processor(processor, model, backend, console_step)
201+
if processor is not None:
202+
console_step.finish(
203+
title="Processor resolved",
204+
details=f"Using processor '{processor}'",
205+
status_level="success",
206+
)
207+
elif model is not None:
208+
console_step.finish(
209+
title="Processor resolved",
210+
details=f"Using model '{model}' as processor",
211+
status_level="success",
212+
)
213+
processor = model
214+
else:
215+
console_step.update(
216+
title="Resolving processor from backend.default_model",
217+
status_level="info",
218+
)
219+
processor = await backend.default_model()
220+
console_step.finish(
221+
title="Processor resolved",
222+
details=(
223+
f"Using model '{processor}' from backend "
224+
f"{backend.__class__.__name__} as processor"
225+
),
226+
status_level="success",
227+
)
228+
await backend.process_shutdown()
280229

281230
with console.print_update_step(
282231
title=f"Initializing request loader from {data}"
283232
) as console_step:
284-
request_loader = init_request_loader(data, data_args, processor, console_step)
233+
request_loader = GenerativeRequestLoader(
234+
data=data,
235+
data_args=data_args,
236+
processor=processor,
237+
processor_args=processor_args,
238+
shuffle=data_sampler == "random",
239+
random_seed=random_seed,
240+
)
241+
unique_requests = request_loader.num_unique_items(raise_err=False)
242+
console_step.finish(
243+
title=(
244+
f"Request loader initialized with {unique_requests} unique requests "
245+
f"from {data}"
246+
),
247+
details=InfoMixin.extract_from_obj(request_loader),
248+
status_level="success",
249+
)
285250

286251
with console.print_update_step(
287252
title=f"Resolving profile {profile}"
288253
) as console_step:
289-
resolve_profile(
254+
profile = await resolve_profile(
290255
{
291256
"max_seconds": max_seconds,
292257
"max_requests": max_requests,
@@ -298,7 +263,11 @@ async def benchmark_generative_text( # noqa: C901
298263
rate,
299264
random_seed,
300265
constraints,
301-
console_step,
266+
)
267+
console_step.finish(
268+
title=f"{profile.__class__.__name__} profile resolved",
269+
details=InfoMixin.extract_from_obj(profile),
270+
status_level="success",
302271
)
303272

304273
with console.print_update_step(
@@ -321,7 +290,12 @@ async def benchmark_generative_text( # noqa: C901
321290
)
322291

323292
with console.print_update_step(title="Resolving output formats") as console_step:
324-
resolved_output_formats = resolve_output_formats(output_formats, output_path, console_step)
293+
resolved_output_formats = await resolve_output_formats(output_formats, output_path)
294+
console_step.finish(
295+
title="Output formats resolved",
296+
details={key: str(val) for key, val in resolved_output_formats.items()},
297+
status_level="success",
298+
)
325299

326300
progress_group = BenchmarkerProgressGroup(
327301
instances=progress or [], enabled=bool(progress)
@@ -355,7 +329,7 @@ async def benchmark_generative_text( # noqa: C901
355329
if benchmark:
356330
report.benchmarks.append(benchmark)
357331

358-
output_format_results = finalize_outputs(report, resolved_output_formats)
332+
output_format_results = await finalize_outputs(report, resolved_output_formats)
359333

360334
console.print("\n\n")
361335
console.print_update(
@@ -386,7 +360,12 @@ async def reimport_benchmarks_report(
386360
console_step.finish(f"Import of old benchmarks complete; loaded {len(report.benchmarks)} benchmark(s)")
387361

388362
with console.print_update_step(title="Resolving output formats") as console_step:
389-
resolved_output_formats = await resolve_output_formats(output_formats, output_path, console_step)
363+
resolved_output_formats = await resolve_output_formats(output_formats, output_path)
364+
console_step.finish(
365+
title="Output formats resolved",
366+
details={key: str(val) for key, val in resolved_output_formats.items()},
367+
status_level="success",
368+
)
390369

391370
output_format_results = await finalize_outputs(report, resolved_output_formats)
392371

0 commit comments

Comments
 (0)