Skip to content

Commit 331978b

Browse files
committed
Fix/ignore linting errors due to line length changes
1 parent 2bedc6d commit 331978b

File tree

2 files changed

+22
-17
lines changed

2 files changed

+22
-17
lines changed

src/guidellm/benchmark/benchmark.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -847,27 +847,31 @@ def from_stats(
847847
time_to_first_token_ms=StatusDistributionSummary.from_values(
848848
value_types=list(total_types_with_output_first),
849849
values=[
850-
req.time_to_first_token_ms or 0 for req in total_with_output_first
850+
req.time_to_first_token_ms or 0
851+
for req in total_with_output_first
851852
],
852853
),
853854
time_per_output_token_ms=StatusDistributionSummary.from_values(
854855
value_types=list(total_types_with_output_first),
855856
values=[
856-
req.time_per_output_token_ms or 0 for req in total_with_output_first
857+
req.time_per_output_token_ms or 0
858+
for req in total_with_output_first
857859
],
858860
weights=[req.output_tokens for req in total_with_output_first],
859861
),
860862
inter_token_latency_ms=StatusDistributionSummary.from_values(
861863
value_types=list(total_types_with_output_multi),
862864
values=[
863-
req.inter_token_latency_ms or 0 for req in total_with_output_multi
865+
req.inter_token_latency_ms or 0
866+
for req in total_with_output_multi
864867
],
865868
weights=[req.output_tokens - 1 for req in total_with_output_multi],
866869
),
867870
output_tokens_per_second=StatusDistributionSummary.from_iterable_request_times(
868871
request_types=list(total_types_with_output_first),
869872
requests=[
870-
(req.start_time, req.end_time) for req in total_with_output_first
873+
(req.start_time, req.end_time)
874+
for req in total_with_output_first
871875
],
872876
first_iter_times=[
873877
req.first_token_time or req.start_time
@@ -878,7 +882,8 @@ def from_stats(
878882
tokens_per_second=StatusDistributionSummary.from_iterable_request_times(
879883
request_types=list(total_types_with_output_first),
880884
requests=[
881-
(req.start_time, req.end_time) for req in total_with_output_first
885+
(req.start_time, req.end_time)
886+
for req in total_with_output_first
882887
],
883888
first_iter_times=[
884889
req.first_token_time or req.start_time

src/guidellm/benchmark/output.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -263,23 +263,23 @@ def print_benchmarks_info(self):
263263
f"{benchmark.total_count.errored}"
264264
),
265265
(
266-
f"{benchmark.metrics.prompt_token_count.successful.mean:>5.1f} / "
266+
f"{benchmark.metrics.prompt_token_count.successful.mean:>5.1f} / " # noqa: E501
267267
f"{benchmark.metrics.prompt_token_count.incomplete.mean:.1f} / "
268268
f"{benchmark.metrics.prompt_token_count.errored.mean:.1f}"
269269
),
270270
(
271-
f"{benchmark.metrics.output_token_count.successful.mean:>5.1f} / "
271+
f"{benchmark.metrics.output_token_count.successful.mean:>5.1f} / " # noqa: E501
272272
f"{benchmark.metrics.output_token_count.incomplete.mean:.1f} / "
273273
f"{benchmark.metrics.output_token_count.errored.mean:.1f}"
274274
),
275275
(
276-
f"{benchmark.metrics.prompt_token_count.successful.total_sum:>6.0f} / "
277-
f"{benchmark.metrics.prompt_token_count.incomplete.total_sum:.0f} / "
276+
f"{benchmark.metrics.prompt_token_count.successful.total_sum:>6.0f} / " # noqa: E501
277+
f"{benchmark.metrics.prompt_token_count.incomplete.total_sum:.0f} / " # noqa: E501
278278
f"{benchmark.metrics.prompt_token_count.errored.total_sum:.0f}"
279279
),
280280
(
281-
f"{benchmark.metrics.output_token_count.successful.total_sum:>6.0f} / "
282-
f"{benchmark.metrics.output_token_count.incomplete.total_sum:.0f} / "
281+
f"{benchmark.metrics.output_token_count.successful.total_sum:>6.0f} / " # noqa: E501
282+
f"{benchmark.metrics.output_token_count.incomplete.total_sum:.0f} / " # noqa: E501
283283
f"{benchmark.metrics.output_token_count.errored.total_sum:.0f}"
284284
),
285285
]
@@ -323,18 +323,18 @@ def print_benchmarks_stats(self):
323323
f"{benchmark.metrics.request_latency.successful.percentiles.p99:.2f}"
324324
),
325325
(
326-
f"{benchmark.metrics.time_to_first_token_ms.successful.mean:.1f} / "
327-
f"{benchmark.metrics.time_to_first_token_ms.successful.median:.1f} / "
326+
f"{benchmark.metrics.time_to_first_token_ms.successful.mean:.1f} / " # noqa: E501
327+
f"{benchmark.metrics.time_to_first_token_ms.successful.median:.1f} / " # noqa: E501
328328
f"{benchmark.metrics.time_to_first_token_ms.successful.percentiles.p99:.1f}"
329329
),
330330
(
331-
f"{benchmark.metrics.inter_token_latency_ms.successful.mean:.1f} / "
332-
f"{benchmark.metrics.inter_token_latency_ms.successful.median:.1f} / "
331+
f"{benchmark.metrics.inter_token_latency_ms.successful.mean:.1f} / " # noqa: E501
332+
f"{benchmark.metrics.inter_token_latency_ms.successful.median:.1f} / " # noqa: E501
333333
f"{benchmark.metrics.inter_token_latency_ms.successful.percentiles.p99:.1f}"
334334
),
335335
(
336-
f"{benchmark.metrics.time_per_output_token_ms.successful.mean:.1f} / "
337-
f"{benchmark.metrics.time_per_output_token_ms.successful.median:.1f} / "
336+
f"{benchmark.metrics.time_per_output_token_ms.successful.mean:.1f} / " # noqa: E501
337+
f"{benchmark.metrics.time_per_output_token_ms.successful.median:.1f} / " # noqa: E501
338338
f"{benchmark.metrics.time_per_output_token_ms.successful.percentiles.p99:.1f}"
339339
),
340340
]

0 commit comments

Comments
 (0)