Skip to content

Commit 609b533

Browse files
authored
[Bugfix] Add proper comparison for package versions (#22314)
Signed-off-by: Syed Muhammad Bin Asif <[email protected]>
1 parent 5e9455a commit 609b533

File tree

13 files changed

+40
-16
lines changed

13 files changed

+40
-16
lines changed

benchmarks/kernels/benchmark_bitblas.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,16 @@
33
# Copyright (c) Microsoft Corporation.
44
# Licensed under the MIT License.
55

6+
from packaging import version
7+
68
from vllm.model_executor.layers.quantization.utils.bitblas_utils import (
79
MINIMUM_BITBLAS_VERSION,
810
)
911

1012
try:
1113
import bitblas
1214

13-
if bitblas.__version__ < MINIMUM_BITBLAS_VERSION:
15+
if version.parse(bitblas.__version__) < version.parse(MINIMUM_BITBLAS_VERSION):
1416
raise ImportError(
1517
"bitblas version is wrong. Please "
1618
f"install bitblas>={MINIMUM_BITBLAS_VERSION}"

docs/design/arch_overview.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,8 @@ vision-language model.
200200
lora_config = vllm_config.lora_config
201201
super().__init__(config, cache_config, quant_config, lora_config, prefix)
202202

203-
if __version__ >= "0.6.4":
203+
from packaging import version
204+
if version.parse(__version__) >= version.parse("0.6.4"):
204205
MyModel = MyNewModel
205206
else:
206207
MyModel = MyOldModel

vllm/attention/ops/triton_decode_attention.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@
3131

3232
import logging
3333

34+
from packaging import version
35+
3436
from vllm.platforms import current_platform
3537
from vllm.triton_utils import tl, triton
3638

@@ -40,7 +42,7 @@
4042

4143
# Only print the following warnings when triton version < 3.2.0.
4244
# The issue won't affect performance or accuracy.
43-
if triton.__version__ < '3.2.0':
45+
if version.parse(triton.__version__) < version.parse('3.2.0'):
4446
logger.warning(
4547
"The following error message 'operation scheduled before its operands' "
4648
"can be ignored.")

vllm/model_executor/layers/quantization/bitblas.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from typing import Any, Optional
44

55
import torch
6+
from packaging import version
67

78
from vllm.logger import init_logger
89
from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase
@@ -45,7 +46,8 @@ def __init__(
4546
) -> None:
4647
try:
4748
import bitblas
48-
if bitblas.__version__ < MINIMUM_BITBLAS_VERSION:
49+
if version.parse(bitblas.__version__) < version.parse(
50+
MINIMUM_BITBLAS_VERSION):
4951
raise ImportError(
5052
"bitblas version is wrong. Please "
5153
f"install bitblas>={MINIMUM_BITBLAS_VERSION}")

vllm/model_executor/layers/quantization/bitsandbytes.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from typing import Any, Callable, Optional, Union
55

66
import torch
7+
from packaging import version
78

89
from vllm.model_executor.layers.fused_moe.layer import (FusedMoE,
910
FusedMoEMethodBase)
@@ -169,7 +170,8 @@ class BitsAndBytesLinearMethod(LinearMethodBase):
169170
def __init__(self, quant_config: BitsAndBytesConfig):
170171
try:
171172
import bitsandbytes
172-
if bitsandbytes.__version__ < "0.46.1":
173+
if version.parse(
174+
bitsandbytes.__version__) < version.parse("0.46.1"):
173175
raise ImportError("bitsandbytes version is wrong. Please "
174176
"install bitsandbytes>=0.46.1.")
175177
except ImportError as err:
@@ -412,7 +414,8 @@ class BitsAndBytesMoEMethod(FusedMoEMethodBase):
412414
def __init__(self, quant_config: BitsAndBytesConfig):
413415
try:
414416
import bitsandbytes
415-
if bitsandbytes.__version__ < "0.46.1":
417+
if version.parse(
418+
bitsandbytes.__version__) < version.parse("0.46.1"):
416419
raise ImportError("bitsandbytes version is wrong. Please "
417420
"install bitsandbytes>=0.46.1.")
418421
except ImportError as err:

vllm/model_executor/layers/quantization/deepspeedfp.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import torch
77
import torch.nn as nn
88
import torch.nn.functional as F
9+
from packaging import version
910

1011
from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase
1112
from vllm.model_executor.layers.quantization import QuantizationMethods
@@ -145,7 +146,7 @@ def __new__(cls, orig_shape: torch.Size, params_dtype: torch.dtype,
145146
quant_config: DeepSpeedFPConfig):
146147
try:
147148
import deepspeed
148-
if deepspeed.__version__ < "0.14.2":
149+
if version.parse(deepspeed.__version__) < version.parse("0.14.2"):
149150
raise ImportError("deepspeed version is wrong. Please "
150151
"install deepspeed>=0.14.2.")
151152
from deepspeed.ops.fp_quantizer import FP_Quantize

vllm/model_executor/layers/quantization/gptq_bitblas.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from typing import Any, Optional
44

55
import torch
6+
from packaging import version
67
from torch.nn.parameter import Parameter
78

89
from vllm.logger import init_logger
@@ -63,7 +64,8 @@ def __init__(
6364

6465
try:
6566
import bitblas
66-
if bitblas.__version__ < MINIMUM_BITBLAS_VERSION:
67+
if version.parse(bitblas.__version__) < version.parse(
68+
MINIMUM_BITBLAS_VERSION):
6769
raise ImportError(
6870
"bitblas version is wrong. Please "
6971
f"install bitblas>={MINIMUM_BITBLAS_VERSION}")

vllm/model_executor/layers/quantization/ipex_quant.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from typing import Any, Optional
55

66
import torch
7+
from packaging import version
78

89
from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase,
910
UnquantizedLinearMethod)
@@ -135,7 +136,8 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
135136

136137
try:
137138
import intel_extension_for_pytorch as ipex
138-
if ipex.__version__ < MIN_IPEX_VERSION:
139+
if version.parse(
140+
ipex.__version__) < version.parse(MIN_IPEX_VERSION):
139141
raise ImportError(
140142
"intel_extension_for_pytorch version is "
141143
"wrong. Please install "
@@ -199,7 +201,8 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
199201

200202
try:
201203
import intel_extension_for_pytorch as ipex
202-
if ipex.__version__ < MIN_IPEX_VERSION:
204+
if version.parse(
205+
ipex.__version__) < version.parse(MIN_IPEX_VERSION):
203206
raise ImportError(
204207
"intel_extension_for_pytorch version is "
205208
"wrong. Please install "

vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from typing import Optional
55

66
import torch
7+
from packaging import version
78

89
from vllm.logger import init_logger
910
from vllm.model_executor.layers.quantization.base_config import (
@@ -110,7 +111,8 @@ def can_implement(cls,
110111

111112
try:
112113
import bitblas
113-
if bitblas.__version__ < MINIMUM_BITBLAS_VERSION:
114+
if version.parse(bitblas.__version__) < version.parse(
115+
MINIMUM_BITBLAS_VERSION):
114116
raise ImportError(
115117
"bitblas version is wrong. Please "
116118
f"install bitblas>={MINIMUM_BITBLAS_VERSION}")

vllm/model_executor/layers/quantization/utils/bitblas_utils.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from typing import Optional
44

55
import torch
6+
from packaging import version
67

78
from vllm.platforms import current_platform
89
from vllm.scalar_type import ScalarType, scalar_types
@@ -75,7 +76,8 @@ def _check_bitblas_supported(
7576
# Finally, check if bitblas is installed
7677
try:
7778
import bitblas
78-
if bitblas.__version__ < MINIMUM_BITBLAS_VERSION:
79+
if version.parse(
80+
bitblas.__version__) < version.parse(MINIMUM_BITBLAS_VERSION):
7981
raise ImportError("bitblas version is wrong. Please "
8082
f"install bitblas>={MINIMUM_BITBLAS_VERSION}")
8183
except ImportError:

0 commit comments

Comments
 (0)