Skip to content

Commit 28a1208

Browse files
committed
If prompt/output token count is 0, don't set stats/constraints
1 parent 223bd60 commit 28a1208

File tree

1 file changed

+6
-8
lines changed

1 file changed

+6
-8
lines changed

src/guidellm/request/session.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
import itertools
22
from abc import ABC, abstractmethod
3-
from collections.abc import Sequence
4-
from typing import Generic
3+
from typing import TYPE_CHECKING, Generic
4+
5+
if TYPE_CHECKING:
6+
from collections.abc import Sequence
57

68
from guidellm.backend.response import ResponseSummary
79
from guidellm.config import settings
@@ -64,12 +66,8 @@ def get_next_request(self) -> GenerationRequest:
6466
return GenerationRequest(
6567
request_type=settings.preferred_route,
6668
content=content,
67-
stats=(
68-
{"prompt_tokens": prompt_tokens} if prompt_tokens is not None else {}
69-
),
70-
constraints=(
71-
{"output_tokens": output_tokens} if output_tokens is not None else {}
72-
),
69+
stats=({"prompt_tokens": prompt_tokens} if prompt_tokens else {}),
70+
constraints=({"output_tokens": output_tokens} if output_tokens else {}),
7371
)
7472

7573
def get_next_delay(self) -> float:

0 commit comments

Comments
 (0)