File tree Expand file tree Collapse file tree 3 files changed +1
-21
lines changed Expand file tree Collapse file tree 3 files changed +1
-21
lines changed Original file line number Diff line number Diff line change 12
12
import torch_xla .core
13
13
import torch_xla .core .xla_model
14
14
15
- from vllm import envs
16
15
from vllm .attention .layer import MultiHeadAttention
17
16
from vllm .attention .selector import _cached_get_attn_backend
18
17
from vllm .platforms import current_platform
19
18
20
- if not envs .VLLM_USE_V1 :
21
- pytest .skip (
22
- "Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test." ,
23
- allow_module_level = True ,
24
- )
25
-
26
19
27
20
@pytest .fixture (autouse = True )
28
21
def clear_cache ():
Original file line number Diff line number Diff line change 4
4
import openai
5
5
import pytest
6
6
7
- from vllm import envs
8
7
from vllm .multimodal .utils import encode_image_base64 , fetch_image
9
8
from vllm .platforms import current_platform
10
9
11
10
from ...entrypoints .openai .test_vision import TEST_IMAGE_URLS
12
11
from ...utils import RemoteOpenAIServer
13
12
14
- if not envs .VLLM_USE_V1 :
15
- pytest .skip (
16
- "Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test." ,
17
- allow_module_level = True ,
18
- )
19
-
20
13
21
14
@pytest .fixture (scope = "session" )
22
15
def base64_encoded_image () -> dict [str , str ]:
Original file line number Diff line number Diff line change 4
4
5
5
import pytest
6
6
7
- from vllm import LLM , envs
7
+ from vllm import LLM
8
8
from vllm .platforms import current_platform
9
9
from vllm .sampling_params import SamplingParams
10
10
11
- if not envs .VLLM_USE_V1 :
12
- pytest .skip (
13
- "Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test." ,
14
- allow_module_level = True ,
15
- )
16
-
17
11
18
12
@pytest .mark .parametrize ("model_name" , ["Qwen/Qwen2.5-1.5B-Instruct" ])
19
13
@pytest .mark .skipif (not current_platform .is_tpu (),
You can’t perform that action at this time.
0 commit comments