Skip to content

Commit 569aefd

Browse files
chore: remove unnecessary patch_padding_side for the chatglm model (vllm-project#23090)
Signed-off-by: carlory <[email protected]> Signed-off-by: DarkLight1337 <[email protected]> Co-authored-by: Cyrus Leung <[email protected]>
1 parent d3f71f1 commit 569aefd

File tree

2 files changed

+0
-29
lines changed

2 files changed

+0
-29
lines changed

tests/models/multimodal/generation/vlm_utils/model_utils.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
from transformers.video_utils import VideoMetadata
2020

2121
from vllm.sequence import SampleLogprobs
22-
from vllm.transformers_utils.tokenizer import patch_padding_side
2322
from vllm.utils import is_list_of
2423

2524
from .....conftest import HfRunner, ImageAsset, ImageTestAssets
@@ -343,7 +342,6 @@ def _generate(self, *args, **kwargs):
343342
def glm4v_patch_hf_runner(hf_model: HfRunner) -> HfRunner:
344343
"""Patches and returns an instance of the HfRunner to use for GLM4V."""
345344
hf_processor = hf_model.processor
346-
patch_padding_side(hf_processor)
347345

348346
def processor(*args, text="", images=None, **kwargs):
349347
if images is None:

vllm/transformers_utils/tokenizer.py

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
import warnings
88
from functools import lru_cache
99
from pathlib import Path
10-
from types import MethodType
1110
from typing import TYPE_CHECKING, Any, Optional, Union
1211

1312
import huggingface_hub
@@ -144,26 +143,6 @@ def __reduce__(self):
144143
return cached_tokenizer
145144

146145

147-
def patch_padding_side(tokenizer: PreTrainedTokenizer) -> None:
148-
"""Patch _pad method to accept `padding_side` for older tokenizers."""
149-
orig_pad = tokenizer._pad
150-
151-
def _pad(
152-
self: PreTrainedTokenizer,
153-
*args,
154-
padding_side: Optional[str] = None,
155-
**kwargs,
156-
):
157-
if padding_side is not None and padding_side != self.padding_side:
158-
msg = ("`padding_side` argument is not supported by "
159-
f"{type(tokenizer).__name__} and will be ignored.")
160-
warnings.warn(msg, stacklevel=2)
161-
162-
return orig_pad(*args, **kwargs)
163-
164-
tokenizer._pad = MethodType(_pad, tokenizer)
165-
166-
167146
def get_tokenizer(
168147
tokenizer_name: Union[str, Path],
169148
*args,
@@ -271,12 +250,6 @@ def get_tokenizer(
271250
}
272251
tokenizer.add_special_tokens(special_tokens_map)
273252

274-
# NOTE: We can remove this after https://github.com/zai-org/ChatGLM3/issues/1324
275-
if type(tokenizer).__name__ in ("ChatGLMTokenizer",
276-
"ChatGLM4Tokenizer"):
277-
assert isinstance(tokenizer, PreTrainedTokenizer)
278-
patch_padding_side(tokenizer)
279-
280253
if not isinstance(tokenizer, PreTrainedTokenizerFast):
281254
logger.warning(
282255
"Using a slow tokenizer. This might cause a significant "

0 commit comments

Comments
 (0)