Skip to content

Commit 61aedb5

Browse files
authored
MoveVllmConfig from config/__init__.py to config/vllm.py (vllm-project#25271)
Signed-off-by: Harry Mellor <[email protected]>
1 parent d3bd171 commit 61aedb5

36 files changed

+964
-905
lines changed

vllm/attention/layer.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,7 @@
2020
from vllm.logger import init_logger
2121
from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
2222
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
23-
from vllm.model_executor.layers.quantization.base_config import (
24-
QuantizationConfig)
23+
from vllm.model_executor.layers.quantization import QuantizationConfig
2524
from vllm.model_executor.layers.quantization.input_quant_fp8 import QuantFP8
2625
from vllm.model_executor.layers.quantization.kv_cache import BaseKVCacheMethod
2726
from vllm.model_executor.layers.quantization.utils.quant_utils import (

vllm/attention/layers/chunked_local_attention.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
from vllm.attention.backends.abstract import (AttentionBackend,
1010
AttentionMetadata)
1111
from vllm.attention.selector import get_attn_backend
12-
from vllm.config import CacheConfig, QuantizationConfig
12+
from vllm.config import CacheConfig
13+
from vllm.model_executor.layers.quantization import QuantizationConfig
1314
from vllm.v1.attention.backends.utils import (
1415
CommonAttentionMetadata, make_local_attention_virtual_batches,
1516
subclass_attention_backend)

vllm/config/__init__.py

Lines changed: 79 additions & 826 deletions
Large diffs are not rendered by default.

vllm/config/utils.py

Lines changed: 36 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,21 @@
11
# SPDX-License-Identifier: Apache-2.0
22
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3-
3+
"""Utility functions for vLLM config dataclasses."""
44
import ast
55
import inspect
66
import textwrap
7-
from dataclasses import MISSING, Field, field, fields, is_dataclass
8-
from typing import TYPE_CHECKING, Any, TypeVar
7+
from dataclasses import MISSING, Field, field, fields, is_dataclass, replace
8+
from typing import TYPE_CHECKING, Any, Protocol, TypeVar
99

1010
import regex as re
11+
from typing_extensions import runtime_checkable
1112

1213
if TYPE_CHECKING:
1314
from _typeshed import DataclassInstance
14-
15-
ConfigType = type[DataclassInstance]
1615
else:
17-
ConfigType = type
16+
DataclassInstance = Any
1817

18+
ConfigType = type[DataclassInstance]
1919
ConfigT = TypeVar("ConfigT", bound=ConfigType)
2020

2121

@@ -143,3 +143,33 @@ def pairwise(iterable):
143143

144144
def is_init_field(cls: ConfigType, name: str) -> bool:
145145
return next(f for f in fields(cls) if f.name == name).init
146+
147+
148+
@runtime_checkable
149+
class SupportsHash(Protocol):
150+
151+
def compute_hash(self) -> str:
152+
...
153+
154+
155+
class SupportsMetricsInfo(Protocol):
156+
157+
def metrics_info(self) -> dict[str, str]:
158+
...
159+
160+
161+
def update_config(config: ConfigT, overrides: dict[str, Any]) -> ConfigT:
162+
processed_overrides = {}
163+
for field_name, value in overrides.items():
164+
assert hasattr(
165+
config, field_name), f"{type(config)} has no field `{field_name}`"
166+
current_value = getattr(config, field_name)
167+
if is_dataclass(current_value) and not is_dataclass(value):
168+
assert isinstance(value, dict), (
169+
f"Overrides to {type(config)}.{field_name} must be a dict"
170+
f" or {type(current_value)}, but got {type(value)}")
171+
value = update_config(
172+
current_value, # type: ignore[type-var]
173+
value)
174+
processed_overrides[field_name] = value
175+
return replace(config, **processed_overrides)

0 commit comments

Comments
 (0)