Skip to content

Commit 0c275ad

Browse files
authored
[V0 Deprecation][TPU] Remove V1 flag check from tests (#22248)
Signed-off-by: NickLucche <[email protected]>
1 parent 74333ae commit 0c275ad

File tree

3 files changed

+1
-21
lines changed

3 files changed

+1
-21
lines changed

tests/v1/tpu/test_mha_attn.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,10 @@
1212
import torch_xla.core
1313
import torch_xla.core.xla_model
1414

15-
from vllm import envs
1615
from vllm.attention.layer import MultiHeadAttention
1716
from vllm.attention.selector import _cached_get_attn_backend
1817
from vllm.platforms import current_platform
1918

20-
if not envs.VLLM_USE_V1:
21-
pytest.skip(
22-
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
23-
allow_module_level=True,
24-
)
25-
2619

2720
@pytest.fixture(autouse=True)
2821
def clear_cache():

tests/v1/tpu/test_multimodal.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,19 +4,12 @@
44
import openai
55
import pytest
66

7-
from vllm import envs
87
from vllm.multimodal.utils import encode_image_base64, fetch_image
98
from vllm.platforms import current_platform
109

1110
from ...entrypoints.openai.test_vision import TEST_IMAGE_URLS
1211
from ...utils import RemoteOpenAIServer
1312

14-
if not envs.VLLM_USE_V1:
15-
pytest.skip(
16-
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
17-
allow_module_level=True,
18-
)
19-
2013

2114
@pytest.fixture(scope="session")
2215
def base64_encoded_image() -> dict[str, str]:

tests/v1/tpu/test_sampler.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,10 @@
44

55
import pytest
66

7-
from vllm import LLM, envs
7+
from vllm import LLM
88
from vllm.platforms import current_platform
99
from vllm.sampling_params import SamplingParams
1010

11-
if not envs.VLLM_USE_V1:
12-
pytest.skip(
13-
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
14-
allow_module_level=True,
15-
)
16-
1711

1812
@pytest.mark.parametrize("model_name", ["Qwen/Qwen2.5-1.5B-Instruct"])
1913
@pytest.mark.skipif(not current_platform.is_tpu(),

0 commit comments

Comments
 (0)