Skip to content

Commit a388252

Browse files
hmellorIsotr0py
andauthored
Add explicit pooling classes for the Transformers backend (#25322)
Signed-off-by: Harry Mellor <[email protected]> Signed-off-by: Isotr0py <[email protected]> Co-authored-by: Isotr0py <[email protected]>
1 parent 9a9f48d commit a388252

File tree

7 files changed

+296
-138
lines changed

7 files changed

+296
-138
lines changed

tests/models/registry.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -657,7 +657,8 @@ def check_available_online(
657657
}
658658

659659
_TRANSFORMERS_BACKEND_MODELS = {
660-
"TransformersModel": _HfExamplesInfo("Qwen/Qwen3-Embedding-0.6B"),
660+
"TransformersEmbeddingModel": _HfExamplesInfo("BAAI/bge-base-en-v1.5", min_transformers_version="4.57.0.dev0"), # noqa: E501
661+
"TransformersForSequenceClassification": _HfExamplesInfo("papluca/xlm-roberta-base-language-detection", min_transformers_version="4.57.0.dev0"), # noqa: E501
661662
"TransformersForCausalLM": _HfExamplesInfo("hmellor/Ilama-3.2-1B", trust_remote_code=True), # noqa: E501
662663
"TransformersForMultimodalLM": _HfExamplesInfo("BAAI/Emu3-Chat-hf"),
663664
}

tests/models/test_transformers.py

Lines changed: 41 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,16 @@
99

1010
from ..conftest import HfRunner, VllmRunner
1111
from ..utils import multi_gpu_test, prep_prompts
12+
from .registry import HF_EXAMPLE_MODELS
1213
from .utils import check_embeddings_close, check_logprobs_close
1314

1415

16+
def get_model(arch: str) -> str:
17+
model_info = HF_EXAMPLE_MODELS.get_hf_info(arch)
18+
model_info.check_transformers_version(on_fail="skip")
19+
return model_info.default
20+
21+
1522
def check_implementation(
1623
runner_ref: type[Union[HfRunner, VllmRunner]],
1724
runner_test: type[VllmRunner],
@@ -170,71 +177,47 @@ def test_embed_loading(vllm_runner, model):
170177

171178

172179
@pytest.mark.parametrize(
173-
"model",
174-
[
175-
# Encoder model
176-
"BAAI/bge-base-en-v1.5",
177-
])
178-
def test_embed_correctness(hf_runner, vllm_runner, example_prompts, model):
179-
import transformers
180-
from packaging.version import Version
181-
installed = Version(transformers.__version__)
182-
required = Version("4.57.0.dev0")
183-
if installed < required:
184-
pytest.skip("Encoder models with the Transformers backend require "
185-
f"transformers>={required}, but got {installed}")
186-
187-
with vllm_runner(model, max_model_len=512,
188-
model_impl="transformers") as vllm_model:
180+
"arch",
181+
["TransformersEmbeddingModel", "TransformersForSequenceClassification"])
182+
def test_pooling(hf_runner, vllm_runner, example_prompts, arch):
183+
model = get_model(arch)
184+
185+
vllm_kwargs = dict(
186+
max_model_len=None,
187+
model_impl="transformers",
188+
compilation_config=dict(cudagraph_capture_sizes=[8]),
189+
)
190+
191+
hf_kwargs = dict()
192+
if arch == "TransformersEmbeddingModel":
193+
hf_kwargs["is_sentence_transformer"] = True
194+
elif arch == "TransformersForSequenceClassification":
195+
from transformers import AutoModelForSequenceClassification
196+
hf_kwargs["auto_cls"] = AutoModelForSequenceClassification
197+
198+
# The example_prompts has ending "\n", for example:
199+
# "Write a short story about a robot that dreams for the first time.\n"
200+
# sentence_transformers will strip the input texts, see:
201+
# https://github.com/UKPLab/sentence-transformers/blob/v3.1.1/sentence_transformers/models/Transformer.py#L159
202+
# This makes the input_ids different between hf_model and vllm_model.
203+
# So we need to strip the input texts to avoid test failing.
204+
example_prompts = [str(s).strip() for s in example_prompts]
205+
206+
with (vllm_runner(model, **vllm_kwargs) as
207+
vllm_model, hf_runner(model, **hf_kwargs) as hf_model):
189208
model_config = vllm_model.llm.llm_engine.model_config
190209
assert model_config.using_transformers_backend()
191210

192-
vllm_outputs = vllm_model.embed(example_prompts)
193-
194-
with hf_runner(model, is_sentence_transformer=True) as hf_model:
195-
hf_outputs = hf_model.encode(example_prompts)
211+
if arch == "TransformersEmbeddingModel":
212+
vllm_outputs = vllm_model.embed(example_prompts)
213+
hf_outputs = hf_model.encode(example_prompts)
214+
elif arch == "TransformersForSequenceClassification":
215+
vllm_outputs = vllm_model.classify(example_prompts)
216+
hf_outputs = hf_model.classify(example_prompts)
196217

197218
check_embeddings_close(
198219
embeddings_0_lst=hf_outputs,
199220
embeddings_1_lst=vllm_outputs,
200221
name_0="hf",
201222
name_1="vllm",
202-
tol=1e-2,
203223
)
204-
205-
206-
@pytest.mark.parametrize(
207-
"model",
208-
["jason9693/Qwen2.5-1.5B-apeach"],
209-
)
210-
@pytest.mark.parametrize("dtype", ["float"])
211-
def test_classify(
212-
hf_runner,
213-
vllm_runner,
214-
example_prompts,
215-
model: str,
216-
dtype: str,
217-
) -> None:
218-
import torch
219-
from transformers import AutoModelForSequenceClassification
220-
221-
with vllm_runner(model,
222-
max_model_len=512,
223-
dtype=dtype,
224-
model_impl="transformers") as vllm_model:
225-
model_config = vllm_model.llm.llm_engine.model_config
226-
assert model_config.using_transformers_backend()
227-
228-
vllm_outputs = vllm_model.classify(example_prompts)
229-
230-
with hf_runner(model,
231-
dtype=dtype,
232-
auto_cls=AutoModelForSequenceClassification) as hf_model:
233-
hf_outputs = hf_model.classify(example_prompts)
234-
235-
for hf_output, vllm_output in zip(hf_outputs, vllm_outputs):
236-
hf_output = torch.tensor(hf_output)
237-
vllm_output = torch.tensor(vllm_output)
238-
239-
assert torch.allclose(hf_output, vllm_output,
240-
1e-3 if dtype == "float" else 1e-2)

vllm/config/model.py

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from vllm.config.multimodal import (MMCacheType, MMEncoderTPMode,
2020
MultiModalConfig)
2121
from vllm.config.pooler import PoolerConfig
22+
from vllm.config.scheduler import RunnerType
2223
from vllm.config.utils import assert_hashable, config
2324
from vllm.logger import init_logger
2425
from vllm.platforms import current_platform
@@ -40,7 +41,6 @@
4041
import vllm.model_executor.models as me_models
4142
from vllm.config.load import LoadConfig
4243
from vllm.config.parallel import ParallelConfig
43-
from vllm.config.scheduler import RunnerType
4444
from vllm.model_executor.layers.quantization import QuantizationMethods
4545
from vllm.v1.sample.logits_processor import LogitsProcessor
4646
else:
@@ -52,13 +52,12 @@
5252
"vllm.model_executor.models")
5353
LoadConfig = Any
5454
ParallelConfig = Any
55-
RunnerType = Any
5655
QuantizationMethods = Any
5756
LogitsProcessor = Any
5857

5958
logger = init_logger(__name__)
6059

61-
RunnerOption = Literal["auto", "generate", "pooling", "draft"]
60+
RunnerOption = Literal["auto", RunnerType]
6261
ConvertType = Literal["none", "embed", "classify", "reward"]
6362
ConvertOption = Literal["auto", ConvertType]
6463
TaskOption = Literal["auto", "generate", "embedding", "embed", "classify",
@@ -668,8 +667,28 @@ def validate_model_config_after(self: "ModelConfig") -> "ModelConfig":
668667
def _get_transformers_backend_cls(self) -> str:
669668
"""Determine which Transformers backend class will be used if
670669
`model_impl` is set to `transformers` or `auto`."""
671-
if getattr(self, "runner_type", self.runner) == "pooling":
672-
return "TransformersModel"
670+
# Check if the architecture we're wrapping has defaults
671+
runner = None
672+
convert = None
673+
if defaults := try_match_architecture_defaults(self.architectures[0]):
674+
_, (runner, convert) = defaults
675+
# Overwrite with user-specified values
676+
if self.runner != "auto":
677+
runner = self.runner
678+
if self.convert not in {"auto", "none"}:
679+
convert = self.convert
680+
# Fall back to default values if still not set
681+
if runner is None:
682+
runner = "generate"
683+
if convert in {None, "none"}:
684+
convert = "embed"
685+
# Resolve Transformers backend pooling classes
686+
if runner == "pooling":
687+
if convert == "embed":
688+
return "TransformersEmbeddingModel"
689+
if convert == "classify":
690+
return "TransformersForSequenceClassification"
691+
# Resolve Transformers backend generate classes
673692
if self.hf_config != self.hf_text_config:
674693
# If 'hf_text_config' is the same as 'hf_config'. If not, it is
675694
# probably a composite config, i.e. multimodal
@@ -678,7 +697,9 @@ def _get_transformers_backend_cls(self) -> str:
678697

679698
def using_transformers_backend(self) -> bool:
680699
"""Check if the model is using the Transformers backend class."""
681-
return self.architecture == self._get_transformers_backend_cls()
700+
used_cls = self._model_info.architecture
701+
transformers_backend_cls = self._get_transformers_backend_cls()
702+
return used_cls == transformers_backend_cls
682703

683704
@property
684705
def registry(self):

vllm/config/utils.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import ast
55
import inspect
66
import textwrap
7+
from collections.abc import Iterable
78
from dataclasses import MISSING, Field, field, fields, is_dataclass, replace
89
from typing import TYPE_CHECKING, Any, Protocol, TypeVar
910

@@ -52,6 +53,18 @@ def get_field(cls: ConfigType, name: str) -> Field:
5253
f"{cls.__name__}.{name} must have a default value or default factory.")
5354

5455

56+
def getattr_iter(object: object, names: Iterable[str], default: Any) -> Any:
57+
"""
58+
A helper function that retrieves an attribute from an object which may
59+
have multiple possible names. This is useful when fetching attributes from
60+
arbitrary `transformers.PretrainedConfig` instances.
61+
"""
62+
for name in names:
63+
if hasattr(object, name):
64+
return getattr(object, name)
65+
return default
66+
67+
5568
def contains_object_print(text: str) -> bool:
5669
"""
5770
Check if the text looks like a printed Python object, e.g.

vllm/model_executor/models/registry.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -307,9 +307,10 @@
307307
}
308308

309309
_TRANSFORMERS_BACKEND_MODELS = {
310-
"TransformersModel": ("transformers", "TransformersModel"),
310+
"TransformersEmbeddingModel": ("transformers_pooling", "TransformersEmbeddingModel"), # noqa: E501
311+
"TransformersForSequenceClassification": ("transformers_pooling", "TransformersForSequenceClassification"), # noqa: E501
311312
"TransformersForCausalLM": ("transformers", "TransformersForCausalLM"),
312-
"TransformersForMultimodalLM": ("transformers", "TransformersForMultimodalLM"), # noqa: E501
313+
"TransformersForMultimodalLM": ("transformers", "TransformersForMultimodalLM"), # noqa: E501
313314
}
314315
# yapf: enable
315316

vllm/model_executor/models/transformers.py

Lines changed: 10 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
from vllm.compilation.decorators import support_torch_compile
3232
from vllm.config import (CacheConfig, DeviceConfig, ModelConfig,
3333
ParallelConfig, VllmConfig)
34+
from vllm.config.utils import getattr_iter
3435
from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size
3536
from vllm.distributed.utils import get_pp_indices
3637
from vllm.logger import init_logger
@@ -486,10 +487,13 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
486487

487488
# Input embeddings
488489
if not isinstance(self.model.get_input_embeddings(), PPMissingLayer):
490+
names = ("embedding_size", "hidden_size")
491+
embedding_dim = getattr_iter(self.text_config, names, None)
492+
assert embedding_dim is not None
489493
self.model.set_input_embeddings(
490494
VocabParallelEmbedding(
491495
self.text_config.vocab_size,
492-
self.text_config.hidden_size,
496+
embedding_dim=embedding_dim,
493497
org_num_embeddings=self.text_config.vocab_size,
494498
quant_config=self.quant_config,
495499
))
@@ -645,7 +649,9 @@ def create_attention_instances(
645649
attn_type=attn_type)
646650
return attention_instances
647651

648-
def init_parameters(self, module: nn.Module):
652+
def init_parameters(self,
653+
module: nn.Module,
654+
dtype: Optional[torch.dtype] = None):
649655
"""
650656
If a `parameter` is on the `meta` device, then its parent
651657
`module` is the original module created by:
@@ -659,11 +665,11 @@ def init_parameters(self, module: nn.Module):
659665
if param.device == torch.device("meta"):
660666
new_param = nn.Parameter(
661667
torch.empty_like(param.data,
662-
dtype=self.model_config.dtype,
668+
dtype=dtype or self.model_config.dtype,
663669
device=self.device_config.device))
664670
setattr(module, name, new_param)
665671
for child in module.children():
666-
self.init_parameters(child)
672+
self.init_parameters(child, dtype)
667673

668674
def forward(
669675
self,
@@ -712,73 +718,6 @@ def load_weights(self, weights: Iterable[tuple[str,
712718
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
713719

714720

715-
@support_torch_compile(enable_if=can_enable_torch_compile)
716-
class TransformersModel(TransformersBase):
717-
hf_to_vllm_mapper = WeightsMapper(
718-
orig_to_new_prefix={
719-
# Handle BERT-like models
720-
"bert": "model",
721-
# Add `model.` prefix for base model checkpoints
722-
"": "model.",
723-
# Remove `model.` prefix if it was already there
724-
"model.model.": "model.",
725-
# Pooling adapters will be adjacent to `model`
726-
"model.pooler": "pooler",
727-
"model.score": "score",
728-
# Classifier adapter's classifier layer is renamed to score
729-
"model.classifier": "score",
730-
},
731-
orig_to_new_suffix={
732-
# Replace legacy suffixes used for norms
733-
".gamma": ".weight",
734-
".beta": ".bias",
735-
})
736-
737-
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
738-
super().__init__(vllm_config=vllm_config, prefix=prefix)
739-
740-
# After creating a pooling model, `pooler` will be duplicated.
741-
# The one inside `model` comes from the Transformers modelling code.
742-
# The one after `model` is an adapter from vLLM.
743-
# We want to use the adapter so we nullify the original pooler.
744-
if getattr(self.model, "pooler", None) is not None:
745-
self.skip_prefixes.append("pooler.")
746-
self.model.pooler = torch.nn.Identity()
747-
748-
# Some encoder models have the position_ids buffer in the checkpoint.
749-
# vLLM will always pass position_ids as an argument, so we skip loading
750-
# the buffer if it exists
751-
self.skip_substrs.append("position_ids")
752-
753-
# Some encoder models have the bias of the final classifier layer
754-
# in the checkpoint. vLLM does not use this bias, so we skip loading
755-
# it if it exists
756-
self.skip_substrs.append("score.bias")
757-
758-
def create_attention_instances(
759-
self, attn_type: AttentionType = AttentionType.DECODER):
760-
# TODO(hmellor): Better way to detect encoder models
761-
# In encoder models, the attention layers will have `is_causal=False`
762-
is_encoder = lambda m: not getattr(m, "is_causal", True)
763-
# vLLM does not support encoder-decoder models, so if any encoder layer
764-
# is found, we assume the whole model is an encoder model
765-
if any(is_encoder(m) for m in self.model.modules()):
766-
attn_type = AttentionType.ENCODER_ONLY
767-
768-
# Check minimum transformers version for encoder models support
769-
if attn_type == AttentionType.ENCODER_ONLY:
770-
import transformers
771-
from packaging.version import Version
772-
installed = Version(transformers.__version__)
773-
required = Version("4.57.0.dev0")
774-
if installed < required:
775-
raise ValueError(
776-
"Encoder models with the Transformers backend require "
777-
f"transformers>={required}, but got {installed}")
778-
779-
return super().create_attention_instances(attn_type)
780-
781-
782721
@support_torch_compile(enable_if=can_enable_torch_compile)
783722
class TransformersForCausalLM(TransformersBase):
784723

0 commit comments

Comments
 (0)