We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
LLM.set_tokenizer
1 parent 448b95f commit a073eafCopy full SHA for a073eaf
vllm/entrypoints/llm.py
@@ -9,7 +9,7 @@
9
import torch.nn as nn
10
from pydantic import ValidationError
11
from tqdm.auto import tqdm
12
-from typing_extensions import TypeVar
+from typing_extensions import TypeVar, deprecated
13
14
from vllm.beam_search import (
15
BeamSearchInstance,
@@ -354,6 +354,7 @@ def model_config(self):
354
def get_tokenizer(self) -> AnyTokenizer:
355
return self.llm_engine.get_tokenizer()
356
357
+ @deprecated("`set_tokenizer` is deprecated and will be removed in v0.13.")
358
def set_tokenizer(self, tokenizer: AnyTokenizer) -> None:
359
# While CachedTokenizer is dynamic, have no choice but
360
# compare class name. Misjudgment will arise from
0 commit comments