Skip to content

Commit 8f6d26c

Browse files
committed
Fix integration test
1 parent 271602b commit 8f6d26c

File tree

2 files changed

+1
-4
lines changed

2 files changed

+1
-4
lines changed

tests/integration/llm/prepare.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -433,8 +433,6 @@
433433
"option.model_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
434434
"option.tensor_parallel_degree": 1,
435435
"option.max_rolling_batch_size": 4,
436-
"option.enable_reasoning": True,
437-
"option.reasoning_parser": "deepseek_r1",
438436
},
439437
"tinyllama-input-len-exceeded": {
440438
"option.model_id": "s3://djl-llm/tinyllama-1.1b-chat/",

tests/integration/tests.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -790,8 +790,7 @@ class TestVllm_p4d:
790790
def test_qwen3_vl_32b_instruct(self):
791791
with Runner('lmi', 'qwen3-vl-32b-instruct') as r:
792792
prepare.build_vllm_async_model("qwen3-vl-32b-instruct")
793-
env = ["VLLM_ATTENTION_BACKEND=TORCH_SDPA"]
794-
r.launch(env_vars=env)
793+
r.launch()
795794
client.run("multimodal qwen3-vl-32b-instruct".split())
796795

797796
def test_llama_4_scout_17b_16e_instruct(self):

0 commit comments

Comments
 (0)