Skip to content

Commit f41fea6

Browse files
committed
fix all quality/typing issues
1 parent 904a4dd commit f41fea6

File tree

9 files changed

+57
-30
lines changed

9 files changed

+57
-30
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -160,6 +160,7 @@ GuideLLM UI is a companion frontend for visualizing the results of a GuideLLM be
160160
### 🛠 Generating an HTML report with a benchmark run
161161

162162
Set the output to benchmarks.html for your run:
163+
163164
```base
164165
--output-path=benchmarks.html
165166
```

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,10 @@ dependencies = [
5353
"protobuf",
5454
"pydantic>=2.0.0",
5555
"pydantic-settings>=2.0.0",
56+
"pyhumps>=3.8.0",
5657
"pyyaml>=6.0.0",
5758
"rich",
5859
"transformers",
59-
"pyhumps>=3.8.0",
6060
]
6161

6262
[project.optional-dependencies]

src/guidellm/benchmark/output.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from pathlib import Path
77
from typing import Any, Literal, Optional, Union
88

9-
import humps
9+
import humps # type: ignore[import-not-found]
1010
import yaml
1111
from pydantic import Field
1212
from rich.console import Console
@@ -236,14 +236,15 @@ def save_html(self, path: Union[str, Path]) -> Path:
236236
:param path: The path to create the report at.
237237
:return: The path to the report.
238238
"""
239-
from guidellm.presentation import UIDataBuilder
239+
240240
data_builder = UIDataBuilder(self.benchmarks)
241241
data = data_builder.to_dict()
242242
camel_data = humps.camelize(data)
243-
ui_api_data = {
244-
f"window.{humps.decamelize(k)} = {{}};": f"window.{humps.decamelize(k)} = {json.dumps(v, indent=2)};\n"
245-
for k, v in camel_data.items()
246-
}
243+
ui_api_data = {}
244+
for k, v in camel_data.items():
245+
key = f"window.{humps.decamelize(k)} = {{}};"
246+
value = f"window.{humps.decamelize(k)} = {json.dumps(v, indent=2)};\n"
247+
ui_api_data[key] = value
247248
return create_report(ui_api_data, path)
248249

249250
@staticmethod

src/guidellm/presentation/builder.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
1-
from typing import Any, TYPE_CHECKING
1+
from typing import TYPE_CHECKING, Any
22

33
if TYPE_CHECKING:
44
from guidellm.benchmark.benchmark import GenerativeBenchmark
55

66
from .data_models import BenchmarkDatum, RunInfo, WorkloadDetails
77

8+
89
class UIDataBuilder:
910
def __init__(self, benchmarks: list["GenerativeBenchmark"]):
1011
self.benchmarks = benchmarks

src/guidellm/presentation/data_models.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,26 @@
11
import random
22
from collections import defaultdict
33
from math import ceil
4-
from typing import List, Optional, Tuple, TYPE_CHECKING, Union
4+
from typing import TYPE_CHECKING, Optional, Union
55

6-
from pydantic import BaseModel, computed_field
6+
from pydantic import BaseModel
77

88
if TYPE_CHECKING:
99
from guidellm.benchmark.benchmark import GenerativeBenchmark
1010

1111
from guidellm.objects.statistics import DistributionSummary
1212

13+
1314
class Bucket(BaseModel):
1415
value: Union[float, int]
1516
count: int
1617

1718
@staticmethod
1819
def from_data(
19-
data: Union[List[float], List[int]],
20+
data: Union[list[float], list[int]],
2021
bucket_width: Optional[float] = None,
2122
n_buckets: Optional[int] = None,
22-
) -> Tuple[List["Bucket"], float]:
23+
) -> tuple[list["Bucket"], float]:
2324
if not data:
2425
return [], 1.0
2526

@@ -125,10 +126,14 @@ def from_benchmarks(cls, benchmarks: list["GenerativeBenchmark"]):
125126
]
126127

127128
prompt_tokens = [
128-
float(req.prompt_tokens) for bm in benchmarks for req in bm.requests.successful
129+
float(req.prompt_tokens)
130+
for bm in benchmarks
131+
for req in bm.requests.successful
129132
]
130133
output_tokens = [
131-
float(req.output_tokens) for bm in benchmarks for req in bm.requests.successful
134+
float(req.output_tokens)
135+
for bm in benchmarks
136+
for req in bm.requests.successful
132137
]
133138

134139
prompt_token_buckets, _prompt_token_bucket_width = Bucket.from_data(
@@ -190,10 +195,9 @@ def percentile_rows(self) -> list[dict[str, float]]:
190195
{"percentile": name, "value": value}
191196
for name, value in self.percentiles.model_dump().items()
192197
]
193-
filtered_rows = list(
198+
return list(
194199
filter(lambda row: row["percentile"] in ["p50", "p90", "p95", "p99"], rows)
195200
)
196-
return filtered_rows
197201

198202
@classmethod
199203
def from_distribution_summary(

src/guidellm/presentation/injector.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,8 @@ def inject_data(
3737
html: str,
3838
) -> str:
3939
"""
40-
Injects the json data into the HTML, replacing placeholders only within the <head> section.
40+
Injects the json data into the HTML,
41+
replacing placeholders only within the <head> section.
4142
4243
:param js_data: the json data to inject
4344
:type js_data: dict
@@ -58,6 +59,4 @@ def inject_data(
5859

5960
# Rebuild the HTML
6061
new_head = f"<head>{head_content}</head>"
61-
html = html[: head_match.start()] + new_head + html[head_match.end() :]
62-
63-
return html
62+
return html[: head_match.start()] + new_head + html[head_match.end() :]

tests/unit/presentation/__init__.py

Whitespace-only changes.

tests/unit/presentation/test_injector.py

Lines changed: 30 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,16 @@ class ExampleModel(BaseModel):
1515
@pytest.mark.smoke
1616
def test_inject_data():
1717
html = "<head><script>window.run_info = {};</script></head>"
18-
expected_html = '<head><script>window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };</script></head>'
19-
js_data = { "window.run_info = {};": "window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };" }
18+
expected_html = (
19+
"<head><script>"
20+
"window.run_info ="
21+
'{ "model": { "name": "neuralmagic/Qwen2.5-7B-quantized.w8a8" } };'
22+
"</script></head>"
23+
)
24+
js_data = {
25+
"window.run_info = {};": "window.run_info ="
26+
'{ "model": { "name": "neuralmagic/Qwen2.5-7B-quantized.w8a8" } };'
27+
}
2028
result = inject_data(
2129
js_data,
2230
html,
@@ -26,9 +34,17 @@ def test_inject_data():
2634

2735
@pytest.mark.smoke
2836
def test_create_report_to_file(tmpdir):
29-
js_data = { "window.run_info = {};": "window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };" }
37+
js_data = {
38+
"window.run_info = {};": "window.run_info ="
39+
'{ "model": { "name": "neuralmagic/Qwen2.5-7B-quantized.w8a8" } };'
40+
}
3041
html_content = "<head><script>window.run_info = {};</script></head>"
31-
expected_html_content = '<head><script>window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };</script></head>'
42+
expected_html_content = (
43+
"<head><script>"
44+
"window.run_info ="
45+
'{ "model": { "name": "neuralmagic/Qwen2.5-7B-quantized.w8a8" } };'
46+
"</script></head>"
47+
)
3248

3349
mock_html_path = tmpdir.join("template.html")
3450
mock_html_path.write(html_content)
@@ -44,9 +60,17 @@ def test_create_report_to_file(tmpdir):
4460

4561
@pytest.mark.smoke
4662
def test_create_report_with_file_nested_in_dir(tmpdir):
47-
js_data = { "window.run_info = {};": "window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };" }
63+
js_data = {
64+
"window.run_info = {};": "window.run_info ="
65+
'{ "model": { "name": "neuralmagic/Qwen2.5-7B-quantized.w8a8" } };'
66+
}
4867
html_content = "<head><script>window.run_info = {};</script></head>"
49-
expected_html_content = '<head><script>window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };</script></head>'
68+
expected_html_content = (
69+
"<head><script>"
70+
"window.run_info ="
71+
'{ "model": { "name": "neuralmagic/Qwen2.5-7B-quantized.w8a8" } };'
72+
"</script></head>"
73+
)
5074

5175
output_dir = tmpdir.mkdir("output_dir")
5276
mock_html_path = tmpdir.join("template.html")

tests/unit/test_config.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,10 +49,7 @@ def test_settings_from_env_variables(mocker):
4949
@pytest.mark.smoke
5050
def test_report_generation_default_source():
5151
settings = Settings(env=Environment.LOCAL)
52-
assert (
53-
settings.report_generation.source
54-
== "http://localhost:3000/index.html"
55-
)
52+
assert settings.report_generation.source == "http://localhost:3000/index.html"
5653

5754
settings = Settings(env=Environment.DEV)
5855
assert (

0 commit comments

Comments
 (0)