Skip to content
This repository was archived by the owner on Sep 4, 2025. It is now read-only.

Commit c35e4a3

Browse files
authored
[BugFix] Fix test_phi3v.py (vllm-project#5725)
1 parent 1f56742 commit c35e4a3

File tree

2 files changed

+9
-5
lines changed

2 files changed

+9
-5
lines changed

tests/conftest.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -233,11 +233,13 @@ def generate_greedy(
233233
prompts: List[str],
234234
max_tokens: int,
235235
images: Optional[List[Image.Image]] = None,
236+
**kwargs,
236237
) -> List[Tuple[List[int], str]]:
237238
outputs = self.generate(prompts,
238239
do_sample=False,
239240
max_new_tokens=max_tokens,
240-
images=images)
241+
images=images,
242+
**kwargs)
241243

242244
return [(output_ids[0], output_str[0])
243245
for output_ids, output_str in outputs]

tests/models/test_phi3v.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str],
7777
# numeric difference for longer context and test can't pass
7878
@pytest.mark.parametrize("model_and_config", model_and_vl_config)
7979
@pytest.mark.parametrize("dtype", [target_dtype])
80-
@pytest.mark.parametrize("max_tokens", [8])
80+
@pytest.mark.parametrize("max_tokens", [128])
8181
def test_models(hf_runner, vllm_runner, hf_images, vllm_images,
8282
model_and_config, dtype: str, max_tokens: int) -> None:
8383
"""Inference result should be the same between hf and vllm.
@@ -95,9 +95,11 @@ def test_models(hf_runner, vllm_runner, hf_images, vllm_images,
9595
hf_model_kwargs = {"_attn_implementation": "eager"}
9696
with hf_runner(model_id, dtype=dtype,
9797
model_kwargs=hf_model_kwargs) as hf_model:
98-
hf_outputs = hf_model.generate_greedy(HF_IMAGE_PROMPTS,
99-
max_tokens,
100-
images=hf_images)
98+
hf_outputs = hf_model.generate_greedy(
99+
HF_IMAGE_PROMPTS,
100+
max_tokens,
101+
images=hf_images,
102+
eos_token_id=hf_model.processor.tokenizer.eos_token_id)
101103

102104
vllm_image_prompts = [
103105
p.replace("<|image_1|>",

0 commit comments

Comments
 (0)