Skip to content

Commit 3dd5070

Browse files
[CI/Build] Cleanup VLM tests (#6107)
1 parent 0ed646b commit 3dd5070

File tree

4 files changed

+5
-8
lines changed

4 files changed

+5
-8
lines changed

tests/models/test_llava_next.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import re
21
from typing import List, Optional, Tuple
32

43
import pytest
@@ -36,17 +35,15 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str,
3635
output_ids, output_str, out_logprobs = vllm_output
3736

3837
tokenizer = AutoTokenizer.from_pretrained(model)
39-
image_token_str = tokenizer.decode(IMAGE_TOKEN_ID)
4038
eos_token_id = tokenizer.eos_token_id
4139

4240
hf_output_ids = [
4341
token_id for idx, token_id in enumerate(output_ids)
4442
if token_id != IMAGE_TOKEN_ID or output_ids[idx - 1] != IMAGE_TOKEN_ID
4543
]
4644

47-
hf_output_str = re.sub(fr"({image_token_str})+", "", output_str)
48-
assert hf_output_str[0] == " "
49-
hf_output_str = hf_output_str[1:]
45+
assert output_str[0] == " "
46+
hf_output_str = output_str[1:]
5047
if hf_output_ids[-1] == eos_token_id:
5148
hf_output_str = hf_output_str + tokenizer.decode(eos_token_id)
5249

tests/models/test_phi3v.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,7 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str,
3535
assert output_str_without_image[0] == " "
3636
output_str_without_image = output_str_without_image[1:]
3737

38-
hf_output_str = output_str_without_image.replace("<|user|>", "") \
39-
.replace("<|end|>\n<|assistant|>", " ")
38+
hf_output_str = output_str_without_image + "<|end|><|endoftext|>"
4039

4140
tokenizer = AutoTokenizer.from_pretrained(model)
4241
hf_output_ids = tokenizer.encode(output_str_without_image)

tests/models/utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ def check_logprobs_close(
7777
# Each predicted token must be in top N logprobs of the other
7878
fail_msg = (
7979
f"Test{prompt_idx}:"
80+
f"\nMatched tokens:\t{output_ids_0[:idx]}"
8081
f"\n{name_0}:\t{output_str_0!r}\t{logprobs_elem_0}"
8182
f"\n{name_1}:\t{output_str_1!r}\t{logprobs_elem_1}")
8283

vllm/multimodal/image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def _default_input_mapper(self, ctx: InputContext,
115115
if isinstance(data, Image.Image):
116116
image_processor = self._get_hf_image_processor(model_config)
117117
if image_processor is None:
118-
raise RuntimeError("No HuggingFace processor is available"
118+
raise RuntimeError("No HuggingFace processor is available "
119119
"to process the image object")
120120
try:
121121
batch_data = image_processor \

0 commit comments

Comments
 (0)