Skip to content

Commit a88605e

Browse files
committed
fixes from copilot review and standardize backend package to backends for plural
Signed-off-by: Mark Kurtz <[email protected]>
1 parent 02554b0 commit a88605e

File tree

16 files changed

+34
-26
lines changed

16 files changed

+34
-26
lines changed

src/guidellm/__main__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import click
77
from pydantic import ValidationError
88

9-
from guidellm.backend import BackendType
9+
from guidellm.backends import BackendType
1010
from guidellm.benchmark import (
1111
ProfileType,
1212
reimport_benchmarks_report,
File renamed without changes.

src/guidellm/backend/backend.py renamed to src/guidellm/backends/backend.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
from abc import abstractmethod
1717
from typing import Literal
1818

19-
from guidellm.backend.objects import (
19+
from guidellm.backends.objects import (
2020
GenerationRequest,
2121
GenerationResponse,
2222
)
File renamed without changes.

src/guidellm/backend/openai.py renamed to src/guidellm/backends/openai.py

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@
2323
from PIL import Image
2424
from pydantic import dataclasses
2525

26-
from guidellm.backend.backend import Backend
27-
from guidellm.backend.objects import (
26+
from guidellm.backends.backend import Backend
27+
from guidellm.backends.objects import (
2828
GenerationRequest,
2929
GenerationRequestTimings,
3030
GenerationResponse,
@@ -351,8 +351,8 @@ async def resolve(
351351

352352
if usage_stats is not None:
353353
request_info.request_timings.request_end = time.time()
354-
response.request_output_tokens = usage_stats.output_tokens
355-
response.request_prompt_tokens = usage_stats.prompt_tokens
354+
response.response_output_tokens = usage_stats.output_tokens
355+
response.response_prompt_tokens = usage_stats.prompt_tokens
356356

357357
yield response, request_info
358358

@@ -602,7 +602,7 @@ def _get_body(
602602
**kwargs,
603603
) -> dict[str, Any]:
604604
# Start with endpoint-specific extra body parameters
605-
extra_body = self.extra_body.get(endpoint_type, self.extra_body)
605+
extra_body: dict = self.extra_body.get(endpoint_type, self.extra_body)
606606

607607
body = copy.deepcopy(extra_body)
608608
body.update(request_kwargs or {})
@@ -622,14 +622,22 @@ def _get_body(
622622
if max_output_tokens:
623623
body.update({"stop": None, "ignore_eos": True})
624624

625+
if self.remove_from_body:
626+
for key in self.remove_from_body:
627+
body.pop(key, None)
628+
625629
return {key: val for key, val in body.items() if val is not None}
626630

627631
def _get_completions_text_content(self, data: dict) -> Optional[str]:
628632
if not data.get("choices"):
629633
return None
630634

631-
choice = data["choices"][0]
632-
return choice.get("text") or choice.get("delta", {}).get("content")
635+
choice: dict = data["choices"][0]
636+
return (
637+
choice.get("text")
638+
or choice.get("delta", {}).get("content")
639+
or choice.get("message", {}).get("content")
640+
)
633641

634642
def _get_completions_usage_stats(self, data: dict) -> Optional[UsageStats]:
635643
if not data.get("usage"):

src/guidellm/benchmark/aggregator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
from pydantic import Field
1414

15-
from guidellm.backend import ResponseSummary
15+
from guidellm.backends import ResponseSummary
1616
from guidellm.benchmark.benchmark import (
1717
BenchmarkArgs,
1818
BenchmarkRunStats,

src/guidellm/benchmark/benchmarker.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from pydantic import Field
1515
from transformers import PreTrainedTokenizerBase # type: ignore # noqa: PGH003
1616

17-
from guidellm.backend import Backend, ResponseSummary
17+
from guidellm.backends import Backend, ResponseSummary
1818
from guidellm.benchmark.aggregator import (
1919
AggregatorT,
2020
BenchmarkT,

src/guidellm/benchmark/entrypoints.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
PreTrainedTokenizerBase,
88
)
99

10-
from guidellm.backend import Backend, BackendType
10+
from guidellm.backends import Backend, BackendType
1111
from guidellm.benchmark.benchmarker import GenerativeBenchmarker
1212
from guidellm.benchmark.output import (
1313
GenerativeBenchmarksConsole,

src/guidellm/benchmark/scenario.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
PreTrainedTokenizerBase,
1010
)
1111

12-
from guidellm.backend.backend import BackendType
12+
from guidellm.backends.backend import BackendType
1313
from guidellm.benchmark.profile import ProfileType
1414
from guidellm.objects.pydantic import StandardBaseModel
1515
from guidellm.scheduler.strategies import StrategyType

tests/unit/backend/test_backend.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212

1313
import pytest
1414

15-
from guidellm.backend.backend import Backend, BackendType
16-
from guidellm.backend.objects import (
15+
from guidellm.backends.backend import Backend, BackendType
16+
from guidellm.backends.objects import (
1717
GenerationRequest,
1818
GenerationRequestTimings,
1919
)
@@ -244,7 +244,7 @@ class TestBackendRegistry:
244244
@pytest.mark.smoke
245245
def test_openai_backend_registered(self):
246246
"""Test that OpenAI HTTP backend is registered."""
247-
from guidellm.backend.openai import OpenAIHTTPBackend
247+
from guidellm.backends.openai import OpenAIHTTPBackend
248248

249249
# OpenAI backend should be registered
250250
backend = Backend.create("openai_http", target="http://test")
@@ -262,7 +262,7 @@ def test_backend_create_invalid_type(self):
262262
@pytest.mark.smoke
263263
def test_backend_registry_functionality(self):
264264
"""Test that backend registry functions work."""
265-
from guidellm.backend.openai import OpenAIHTTPBackend
265+
from guidellm.backends.openai import OpenAIHTTPBackend
266266

267267
# Test that we can get registered backends
268268
openai_class = Backend.get_registered_object("openai_http")
@@ -327,6 +327,6 @@ def test_backend_registered_objects(self):
327327
assert len(registered) > 0
328328

329329
# Check that openai backend is in the registered objects
330-
from guidellm.backend.openai import OpenAIHTTPBackend
330+
from guidellm.backends.openai import OpenAIHTTPBackend
331331

332332
assert OpenAIHTTPBackend in registered

0 commit comments

Comments
 (0)