Skip to content

Commit 3d1cfbf

Browse files
authored
[Minor] Delete Llama tokenizer warnings (#2146)
1 parent 37ca558 commit 3d1cfbf

File tree

1 file changed

+0
-17
lines changed

1 file changed

+0
-17
lines changed

vllm/transformers_utils/tokenizer.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,6 @@
88

99
logger = init_logger(__name__)
1010

11-
# A fast LLaMA tokenizer with the pre-processed `tokenizer.json` file.
12-
_FAST_LLAMA_TOKENIZER = "hf-internal-testing/llama-tokenizer"
13-
1411

1512
def get_tokenizer(
1613
tokenizer_name: str,
@@ -27,27 +24,13 @@ def get_tokenizer(
2724
"Cannot use the fast tokenizer in slow tokenizer mode.")
2825
kwargs["use_fast"] = False
2926

30-
if ("llama" in tokenizer_name.lower() and kwargs.get("use_fast", True)
31-
and tokenizer_name != _FAST_LLAMA_TOKENIZER):
32-
logger.info(
33-
"For some LLaMA V1 models, initializing the fast tokenizer may "
34-
"take a long time. To reduce the initialization time, consider "
35-
f"using '{_FAST_LLAMA_TOKENIZER}' instead of the original "
36-
"tokenizer.")
3727
try:
3828
tokenizer = AutoTokenizer.from_pretrained(
3929
tokenizer_name,
4030
*args,
4131
trust_remote_code=trust_remote_code,
4232
tokenizer_revision=tokenizer_revision,
4333
**kwargs)
44-
except TypeError as e:
45-
# The LLaMA tokenizer causes a protobuf error in some environments.
46-
err_msg = (
47-
"Failed to load the tokenizer. If you are using a LLaMA V1 model "
48-
f"consider using '{_FAST_LLAMA_TOKENIZER}' instead of the "
49-
"original tokenizer.")
50-
raise RuntimeError(err_msg) from e
5134
except ValueError as e:
5235
# If the error pertains to the tokenizer class not existing or not
5336
# currently being imported, suggest using the --trust-remote-code flag.

0 commit comments

Comments
 (0)