Skip to content

Commit aede2e6

Browse files
authored
vllm renamed AnyTokenizer to TokenizerLike. (#3005)
1 parent de45404 commit aede2e6

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

engines/python/setup/djl_python/lmi_vllm/request_response_utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
)
2424
from vllm.entrypoints.openai.engine.protocol import ErrorResponse
2525
from vllm.logprobs import Logprob
26-
from vllm.transformers_utils.tokenizer import AnyTokenizer
26+
from vllm.tokenizers import TokenizerLike
2727

2828
from djl_python.outputs import Output
2929
from djl_python.async_utils import create_non_stream_output, create_stream_chunk_output
@@ -101,7 +101,7 @@ def convert_lmi_schema_to_completion_request(
101101

102102
def convert_completion_logprobs_to_tgi_tokens(
103103
completion_logprobs: CompletionLogProbs,
104-
tokenizer: AnyTokenizer,
104+
tokenizer: TokenizerLike,
105105
) -> List[dict]:
106106
token_logprobs = completion_logprobs.token_logprobs
107107
tokens = completion_logprobs.tokens
@@ -138,7 +138,7 @@ def convert_completion_response_to_lmi_schema(
138138
response: CompletionResponse,
139139
request: CompletionRequest = None,
140140
include_details: bool = False,
141-
tokenizer: AnyTokenizer = None,
141+
tokenizer: TokenizerLike = None,
142142
) -> Output:
143143
primary_choice = response.choices[0]
144144
lmi_response = {"generated_text": primary_choice.text}
@@ -258,7 +258,7 @@ def convert_completion_chunk_response_to_lmi_schema(
258258
def lmi_with_details_non_stream_output_formatter(
259259
response: CompletionResponse,
260260
request: CompletionRequest = None,
261-
tokenizer: AnyTokenizer = None,
261+
tokenizer: TokenizerLike = None,
262262
) -> Output:
263263
return convert_completion_response_to_lmi_schema(response,
264264
include_details=True,
@@ -269,7 +269,7 @@ def lmi_with_details_non_stream_output_formatter(
269269
def lmi_non_stream_output_formatter(
270270
response: CompletionResponse,
271271
request: CompletionRequest = None,
272-
tokenizer: AnyTokenizer = None,
272+
tokenizer: TokenizerLike = None,
273273
) -> Output:
274274
return convert_completion_response_to_lmi_schema(response,
275275
include_details=False,

0 commit comments

Comments
 (0)