-
Notifications
You must be signed in to change notification settings - Fork 71
Decoder-native resize public implementation #1003
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 13 commits
dd24dfa
3a2df84
5344ab4
98cf81b
65c4ad7
f300c70
2c3b7f0
80e84b5
5ac60d8
531b40f
cc333ac
238a8ff
55d362c
0d2492e
a2da767
2cd3f65
4ff0186
0f9eb62
8081298
39ed9ac
6e6815c
363e688
463674d
c20914c
254641a
9b4186a
105c77f
70b5976
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -8,7 +8,7 @@ | |||||
| import json | ||||||
| import numbers | ||||||
| from pathlib import Path | ||||||
| from typing import Literal, Optional, Tuple, Union | ||||||
| from typing import Literal, Optional, Sequence, Tuple, Union | ||||||
|
|
||||||
| import torch | ||||||
| from torch import device as torch_device, Tensor | ||||||
|
|
@@ -19,6 +19,7 @@ | |||||
| create_decoder, | ||||||
| ERROR_REPORTING_INSTRUCTIONS, | ||||||
| ) | ||||||
| from torchcodec.transforms import DecoderNativeTransform, Resize | ||||||
|
|
||||||
|
|
||||||
| class VideoDecoder: | ||||||
|
|
@@ -103,6 +104,7 @@ def __init__( | |||||
| dimension_order: Literal["NCHW", "NHWC"] = "NCHW", | ||||||
| num_ffmpeg_threads: int = 1, | ||||||
| device: Optional[Union[str, torch_device]] = "cpu", | ||||||
| transforms: Optional[Sequence[DecoderNativeTransform]] = None, | ||||||
| seek_mode: Literal["exact", "approximate"] = "exact", | ||||||
| custom_frame_mappings: Optional[ | ||||||
| Union[str, bytes, io.RawIOBase, io.BufferedReader] | ||||||
|
|
@@ -148,13 +150,16 @@ def __init__( | |||||
|
|
||||||
| device_variant = _get_cuda_backend() | ||||||
|
|
||||||
| transform_specs = _make_transform_specs(transforms) | ||||||
|
|
||||||
| core.add_video_stream( | ||||||
| self._decoder, | ||||||
| stream_index=stream_index, | ||||||
| dimension_order=dimension_order, | ||||||
| num_threads=num_ffmpeg_threads, | ||||||
| device=device, | ||||||
| device_variant=device_variant, | ||||||
| transform_specs=transform_specs, | ||||||
| custom_frame_mappings=custom_frame_mappings_data, | ||||||
| ) | ||||||
|
|
||||||
|
|
@@ -432,6 +437,60 @@ def _get_and_validate_stream_metadata( | |||||
| ) | ||||||
|
|
||||||
|
|
||||||
| # This function, _make_transform_specs, and the transforms argument to | ||||||
| # VideoDecoder actually accept a union of DecoderNativeTransform and | ||||||
| # TorchVision transforms. We don't put that in our type annotation because | ||||||
| # that would require importing torchvision at module scope which would mean we | ||||||
| # have a hard dependency on torchvision. | ||||||
| # TODO: better explanation of the above. | ||||||
| def _convert_to_decoder_native_transforms( | ||||||
| transforms: Sequence[DecoderNativeTransform], | ||||||
| ) -> Sequence[DecoderNativeTransform]: | ||||||
| try: | ||||||
| from torchvision.transforms import v2 | ||||||
|
|
||||||
| tv_available = True | ||||||
| except ImportError: | ||||||
| tv_available = False | ||||||
|
|
||||||
| converted_transforms = [] | ||||||
| for transform in transforms: | ||||||
| if not isinstance(transform, DecoderNativeTransform): | ||||||
| if not tv_available: | ||||||
| raise ValueError( | ||||||
| f"The supplied transform, {transform}, is not a TorchCodec " | ||||||
| " DecoderNativeTransform. TorchCodec also accept TorchVision " | ||||||
| "v2 transforms, but TorchVision is not installed." | ||||||
| ) | ||||||
| if isinstance(transform, v2.Resize): | ||||||
|
||||||
| if isinstance(transform, v2.Resize): | |
| elif isinstance(transform, v2.Resize): |
scotts marked this conversation as resolved.
Show resolved
Hide resolved
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,7 @@ | ||
| # Copyright (c) Meta Platforms, Inc. and affiliates. | ||
| # All rights reserved. | ||
| # | ||
| # This source code is licensed under the BSD-style license found in the | ||
| # LICENSE file in the root directory of this source tree. | ||
|
|
||
| from ._decoder_native_transforms import DecoderNativeTransform, Resize # noqa |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,39 @@ | ||
| # Copyright (c) Meta Platforms, Inc. and affiliates. | ||
| # All rights reserved. | ||
| # | ||
| # This source code is licensed under the BSD-style license found in the | ||
| # LICENSE file in the root directory of this source tree. | ||
|
|
||
| from abc import ABC, abstractmethod | ||
| from dataclasses import dataclass | ||
| from typing import Sequence | ||
|
|
||
|
|
||
| @dataclass | ||
| class DecoderNativeTransform(ABC): | ||
| """TODO: docstring""" | ||
|
|
||
| @abstractmethod | ||
| def make_params(self) -> str: | ||
| pass | ||
|
|
||
|
|
||
| @dataclass | ||
| class Resize(DecoderNativeTransform): | ||
| """ | ||
| TODO. One benefit of having parallel definitions is that it gives us a place | ||
| to put documentation about what behavior we do and do not support. For | ||
| example, we don't yet have fields for `interpolation` and `antialias` | ||
| because we don't allow users to control those yet in decoder-native | ||
| transforms. | ||
| """ | ||
|
|
||
| # Also note that this type is more restrictive than what TorchVision | ||
| # accepts, but it accurately reflects current decoder-native transform | ||
| # limitations. We can reflect that not just in our docs, but also type | ||
| # annotations. | ||
| size: Sequence[int] | ||
|
|
||
| def make_params(self) -> str: | ||
| assert len(self.size) == 2 | ||
| return f"resize, {self.size[0]}, {self.size[1]}" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I was getting linting errors like: https://github.com/meta-pytorch/torchcodec/actions/runs/19157614790/job/54761644331
Which points to docs which recommend the above change: https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports