Skip to content
This repository was archived by the owner on Sep 4, 2025. It is now read-only.

Commit e56bf27

Browse files
[Bugfix] Fix InternVL2 inference with various num_patches (vllm-project#8375)
Co-authored-by: DarkLight1337 <[email protected]>
1 parent 520ca38 commit e56bf27

File tree

2 files changed

+39
-3
lines changed

2 files changed

+39
-3
lines changed

tests/models/test_internvl.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -331,6 +331,41 @@ def test_multi_images_models(hf_runner, vllm_runner, image_assets, model,
331331
)
332332

333333

334+
@pytest.mark.parametrize("model", ["OpenGVLab/InternVL2-2B"])
335+
@pytest.mark.parametrize("size_factors", [[0.5, 1.0]])
336+
@pytest.mark.parametrize("dtype", [target_dtype])
337+
@pytest.mark.parametrize("max_tokens", [128])
338+
@pytest.mark.parametrize("num_logprobs", [5])
339+
@torch.inference_mode()
340+
def test_different_num_patches(hf_runner, vllm_runner, image_assets, model,
341+
size_factors, dtype: str, max_tokens: int,
342+
num_logprobs: int) -> None:
343+
images = [asset.pil_image.resize((896, 896)) for asset in image_assets]
344+
345+
inputs_batching = [(
346+
[prompt for _ in size_factors],
347+
[rescale_image_size(image, factor) for factor in size_factors],
348+
) for image, prompt in zip(images, HF_IMAGE_PROMPTS)]
349+
350+
inputs_multi_images = [
351+
([HF_MULTIIMAGE_IMAGE_PROMPT for _ in size_factors],
352+
[[rescale_image_size(image, factor) for image in images]
353+
for factor in size_factors])
354+
]
355+
for inputs in [inputs_batching, inputs_multi_images]:
356+
run_test(
357+
hf_runner,
358+
vllm_runner,
359+
inputs,
360+
model,
361+
dtype=dtype,
362+
max_tokens=max_tokens,
363+
num_logprobs=num_logprobs,
364+
mm_limit=2,
365+
tensor_parallel_size=1,
366+
)
367+
368+
334369
@pytest.mark.parametrize(
335370
"models", [("OpenGVLab/InternVL2-2B", "OpenGVLab/InternVL2-2B-AWQ")])
336371
@pytest.mark.parametrize(

vllm/model_executor/models/internvl.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -270,14 +270,14 @@ def input_mapper_for_internvl(ctx: InputContext, data: object):
270270
# Add an N dimension for number of images per prompt (currently 1).
271271
data = data.unsqueeze(0)
272272
elif is_list_of(data, Image.Image):
273+
# we can't stack here because the images may have different num_patches
273274
data = [
274275
image_to_pixel_values(img,
275276
image_size,
276277
min_num,
277278
max_num,
278279
use_thumbnail=use_thumbnail) for img in data
279280
]
280-
data = torch.stack(data)
281281
model_config = ctx.model_config
282282
tokenizer = cached_get_tokenizer(model_config.tokenizer,
283283
trust_remote_code=True)
@@ -449,11 +449,12 @@ def _parse_and_validate_image_input(
449449
if not isinstance(pixel_values, (torch.Tensor, list)):
450450
raise ValueError("Incorrect type of pixel values. "
451451
f"Got type: {type(pixel_values)}")
452-
452+
# We need to flatten (B, N, P) to (B*N*P),
453+
# so we call flatten_bn twice.
453454
return InternVLImagePixelInputs(
454455
type="pixel_values",
455456
data=self._validate_pixel_values(
456-
flatten_bn(pixel_values, concat=True).flatten(0, 1)),
457+
flatten_bn(flatten_bn(pixel_values), concat=True)),
457458
)
458459

459460
raise AssertionError("This line should be unreachable.")

0 commit comments

Comments
 (0)