Skip to content

Commit fff5c87

Browse files
committed
Allow reexporting reimported benchmarks
1 parent 7f2dd40 commit fff5c87

File tree

6 files changed

+50
-13
lines changed

6 files changed

+50
-13
lines changed

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ dependencies = [
5656
"pyyaml>=6.0.0",
5757
"rich",
5858
"transformers",
59+
"click-default-group~=1.2.4"
5960
]
6061

6162
[project.optional-dependencies]

src/guidellm/__main__.py

Lines changed: 36 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,15 @@
33
import json
44
from pathlib import Path
55
from typing import get_args
6+
from click_default_group import DefaultGroup
67

78
import click
89

910
from guidellm.backend import BackendType
1011
from guidellm.benchmark import (
1112
ProfileType,
1213
benchmark_generative_text,
13-
display_benchmarks_report,
14+
reimport_benchmarks_report,
1415
)
1516
from guidellm.config import print_config
1617
from guidellm.preprocess.dataset import ShortPromptStrategy, process_dataset
@@ -48,8 +49,18 @@ def parse_number_str(ctx, param, value): # noqa: ARG001
4849
def cli():
4950
pass
5051

52+
@cli.group(
53+
help="Commands to run a new benchmark or load a prior one.",
54+
cls=DefaultGroup,
55+
default="run",
56+
default_if_no_args=True,
57+
)
58+
def benchmark():
59+
pass
5160

52-
@cli.command(
61+
62+
@benchmark.command(
63+
"run",
5364
help="Run a benchmark against a generative model using the specified arguments."
5465
)
5566
@click.option(
@@ -234,7 +245,7 @@ def cli():
234245
type=int,
235246
help="The random seed to use for benchmarking to ensure reproducibility.",
236247
)
237-
def benchmark(
248+
def run(
238249
target,
239250
backend_type,
240251
backend_args,
@@ -286,14 +297,32 @@ def benchmark(
286297
)
287298

288299

289-
@cli.command(help="Redisplay a saved benchmark report.")
300+
@benchmark.command(help="Load a saved benchmark report.")
290301
@click.argument(
291302
"path",
292303
type=click.Path(file_okay=True, dir_okay=False, exists=True),
293304
default=Path.cwd() / "benchmarks.json",
294305
)
295-
def display(path):
296-
display_benchmarks_report(path)
306+
@click.option(
307+
"--output-path",
308+
type=click.Path(file_okay=True, dir_okay=True, exists=False),
309+
default=None,
310+
is_flag=False,
311+
flag_value=Path.cwd() / "benchmarks_reexported.json",
312+
help=(
313+
"Allows re-exporting the benchmarks to another format."
314+
"The path to save the output to. If it is a directory, "
315+
"it will save benchmarks.json under it. "
316+
"Otherwise, json, yaml, or csv files are supported for output types "
317+
"which will be read from the extension for the file path."
318+
"Optional. If the output path flag is not provided, the benchmarks "
319+
"will not be reexported. If the flag is present but no value is "
320+
"specified, it will default to the current directory with the file "
321+
"name benchmarks_reexported.json."
322+
),
323+
)
324+
def from_file(path, output_path):
325+
reimport_benchmarks_report(path, output_path)
297326

298327

299328
def decode_escaped_str(_ctx, _param, value):
@@ -311,6 +340,7 @@ def decode_escaped_str(_ctx, _param, value):
311340

312341

313342
@cli.command(
343+
short_help="Prints environment variable settings.",
314344
help=(
315345
"Print out the available configuration settings that can be set "
316346
"through environment variables."

src/guidellm/benchmark/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
StatusBreakdown,
1313
)
1414
from .benchmarker import Benchmarker, BenchmarkerResult, GenerativeBenchmarker
15-
from .entrypoints import benchmark_generative_text, display_benchmarks_report
15+
from .entrypoints import benchmark_generative_text, reimport_benchmarks_report
1616
from .output import GenerativeBenchmarksConsole, GenerativeBenchmarksReport
1717
from .profile import (
1818
AsyncProfile,
@@ -63,5 +63,5 @@
6363
"ThroughputProfile",
6464
"benchmark_generative_text",
6565
"create_profile",
66-
"display_benchmarks_report",
66+
"reimport_benchmarks_report",
6767
]

src/guidellm/benchmark/entrypoints.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,12 +135,18 @@ async def benchmark_generative_text(
135135

136136
return report, saved_path
137137

138-
def display_benchmarks_report(file: Path):
138+
def reimport_benchmarks_report(file: Path, output_path: Optional[Path]) -> None:
139139
"""
140-
The command-line entry point for displaying a benchmarks report.
140+
The command-line entry point for re-importing and displaying an
141+
existing benchmarks report. Can also specify
141142
Assumes the file provided exists.
142143
"""
143144
console = GenerativeBenchmarksConsole(enabled=True)
144145
report = GenerativeBenchmarksReport.load_file(file)
145146
console.benchmarks = report.benchmarks
146147
console.print_full_report()
148+
149+
if output_path:
150+
console.print_line("\nSaving benchmarks report...")
151+
saved_path = report.save_file(output_path)
152+
console.print_line(f"Benchmarks report saved to {saved_path}")

src/guidellm/benchmark/output.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@ def _file_setup(
242242
if path_suffix in [".csv"]:
243243
return path, "csv"
244244

245-
raise ValueError(f"Unsupported file extension: {path_suffix} for {path}.")
245+
raise ValueError(f"Unsupported file extension: {path_suffix} for {path}; expected json, yaml, or csv.")
246246

247247
@staticmethod
248248
def _benchmark_desc_headers_and_values(

tests/unit/entrypoints/test_display_entrypoint.py renamed to tests/unit/entrypoints/test_benchmark_from_file_entrypoint.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
import pytest
66

7-
from guidellm.benchmark import display_benchmarks_report
7+
from guidellm.benchmark import reimport_benchmarks_report
88

99
# Set to true to re-write the expected output.
1010
REGENERATE_ARTIFACTS = False
@@ -37,7 +37,7 @@ def test_display_entrypoint_yaml(capfd, get_test_asset_dir):
3737
def generic_test_display_entrypoint(filename, capfd, get_test_asset_dir):
3838
os.environ["COLUMNS"] = "180" # CLI output depends on terminal width.
3939
asset_dir = get_test_asset_dir()
40-
display_benchmarks_report(asset_dir / filename)
40+
reimport_benchmarks_report(asset_dir / filename)
4141
out, err = capfd.readouterr()
4242
expected_output_path = asset_dir / "benchmarks_stripped_output.txt"
4343
if REGENERATE_ARTIFACTS:

0 commit comments

Comments
 (0)