Skip to content

Commit a554991

Browse files
authored
Migrate LlavaNextVideoPixelInputs to TensorSchema (#21843)
Signed-off-by: Benji Beck <[email protected]>
1 parent d1af8b7 commit a554991

File tree

1 file changed

+22
-35
lines changed

1 file changed

+22
-35
lines changed

vllm/model_executor/models/llava_next_video.py

Lines changed: 22 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
import math
55
from collections.abc import Iterable, Mapping, Sequence
6-
from typing import Literal, Optional, TypedDict, Union
6+
from typing import Annotated, Literal, Optional, Union
77

88
import torch
99
import torch.nn as nn
@@ -25,6 +25,7 @@
2525
from vllm.multimodal.profiling import BaseDummyInputsBuilder
2626
from vllm.sequence import IntermediateTensors
2727
from vllm.utils import is_list_of
28+
from vllm.utils.tensor_schema import TensorSchema, TensorShape
2829

2930
from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP
3031
from .llava import init_vision_tower_for_llava
@@ -35,17 +36,25 @@
3536
from .vision import get_vision_encoder_info
3637

3738

38-
class LlavaNextVideoPixelInputs(TypedDict):
39-
type: Literal["pixel_values_videos"]
40-
data: Union[torch.Tensor, list[torch.Tensor]]
41-
"""
42-
Shape: `(batch_size, num_frames, num_channels, height, width)`
39+
class LlavaNextVideoPixelInputs(TensorSchema):
40+
"""
41+
Dimensions:
42+
- bs: Batch size
43+
- nv: Number of videos
44+
- nf: Number of frames
45+
- nc: Number of channels (3)
46+
- h: Height of each frame
47+
- w: Width of each frame
4348
4449
Note that `num_frames` may be different for each batch, in which case
4550
the data is passed as a list instead of a batched tensor.
4651
4752
Note that it only supports one video input for one batch.
4853
"""
54+
type: Literal["pixel_values_videos"] = "pixel_values_videos"
55+
56+
data: Annotated[Union[torch.Tensor, list[torch.Tensor]],
57+
TensorShape("bs", "nv", "nf", 3, "h", "w")]
4958

5059

5160
class LlavaNextVideoProcessingInfo(BaseProcessingInfo):
@@ -320,27 +329,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
320329
self.make_empty_intermediate_tensors = (
321330
self.language_model.model.make_empty_intermediate_tensors)
322331

323-
def _validate_video_pixel_values(
324-
self, data: Union[torch.Tensor, list[torch.Tensor]]
325-
) -> Union[torch.Tensor, list[torch.Tensor]]:
326-
327-
h = w = self.config.vision_config.image_size
328-
expected_dims = (3, h, w)
329-
330-
def _validate_shape(d: torch.Tensor):
331-
actual_dims = tuple(d.shape[2:])
332-
333-
if actual_dims != expected_dims:
334-
expected_expr = ("num_frames", *map(str, expected_dims))
335-
raise ValueError(
336-
"The expected shape of pixel values in each video frame "
337-
f"is {expected_expr}. You supplied {tuple(d.shape)}.")
338-
339-
for d in data:
340-
_validate_shape(d)
341-
342-
return data
343-
344332
def _parse_and_validate_video_input(
345333
self, **kwargs: object) -> Optional[LlavaNextVideoPixelInputs]:
346334
"""
@@ -355,14 +343,13 @@ def _parse_and_validate_video_input(
355343
if pixel_values_videos is None:
356344
return None
357345

358-
if not isinstance(pixel_values_videos, (torch.Tensor, list)):
359-
raise ValueError("Incorrect type of pixel_values_videos. "
360-
f"Got type: {type(pixel_values_videos)}")
361-
362-
return LlavaNextVideoPixelInputs(
363-
type="pixel_values_videos",
364-
data=pixel_values_videos,
365-
)
346+
expected_h = expected_w = self.config.vision_config.image_size
347+
return LlavaNextVideoPixelInputs(type="pixel_values_videos",
348+
data=pixel_values_videos,
349+
resolve_bindings={
350+
"h": expected_h,
351+
"w": expected_w,
352+
})
366353

367354
def _select_image_features(self, image_features: torch.Tensor, *,
368355
strategy: str) -> torch.Tensor:

0 commit comments

Comments
 (0)