-
Notifications
You must be signed in to change notification settings - Fork 71
Decoder-native resize public implementation #1003
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
dd24dfa
3a2df84
5344ab4
98cf81b
65c4ad7
f300c70
2c3b7f0
80e84b5
5ac60d8
531b40f
cc333ac
238a8ff
55d362c
0d2492e
a2da767
2cd3f65
4ff0186
0f9eb62
8081298
39ed9ac
6e6815c
363e688
463674d
c20914c
254641a
9b4186a
105c77f
70b5976
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,17 @@ | ||
| .. _transforms: | ||
|
|
||
| ===================== | ||
| torchcodec.transforms | ||
| ===================== | ||
|
|
||
| .. currentmodule:: torchcodec.transforms | ||
|
|
||
| For a tutorial, see: TODO_DECODER_TRANSFORMS_TUTORIAL. | ||
|
|
||
| .. autosummary:: | ||
| :toctree: generated/ | ||
| :nosignatures: | ||
| :template: dataclass.rst | ||
|
|
||
| DecoderTransform | ||
| Resize |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -125,3 +125,4 @@ Encoding | |
| api_ref_decoders | ||
| api_ref_encoders | ||
| api_ref_samplers | ||
| api_ref_transforms | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,7 @@ | ||
| # Copyright (c) Meta Platforms, Inc. and affiliates. | ||
| # All rights reserved. | ||
| # | ||
| # This source code is licensed under the BSD-style license found in the | ||
| # LICENSE file in the root directory of this source tree. | ||
|
|
||
| from ._decoder_transforms import DecoderTransform, Resize # noqa |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,93 @@ | ||
| # Copyright (c) Meta Platforms, Inc. and affiliates. | ||
| # All rights reserved. | ||
| # | ||
| # This source code is licensed under the BSD-style license found in the | ||
| # LICENSE file in the root directory of this source tree. | ||
|
|
||
| from abc import ABC, abstractmethod | ||
| from dataclasses import dataclass | ||
| from types import ModuleType | ||
| from typing import Sequence | ||
|
|
||
| from torch import nn | ||
|
|
||
|
|
||
| @dataclass | ||
| class DecoderTransform(ABC): | ||
| """Base class for all decoder transforms. | ||
|
|
||
| A *decoder transform* is a transform that is applied by the decoder before | ||
| returning the decoded frame. Applying decoder transforms to frames | ||
| should be both faster and more memory efficient than receiving normally | ||
| decoded frames and applying the same kind of transform. | ||
|
|
||
| Most ``DecoderTransform`` objects have a complementary transform in TorchVision, | ||
| specificially in `torchvision.transforms.v2 <https://docs.pytorch.org/vision/stable/transforms.html>`_. For such transforms, we | ||
| ensure that: | ||
|
|
||
| 1. The names are the same. | ||
| 2. Default behaviors are the same. | ||
| 3. The parameters for the ``DecoderTransform`` object are a subset of the | ||
| TorchVision :class:`~torchvision.transforms.v2.Transform` object. | ||
| 4. Parameters with the same name control the same behavior and accept a | ||
| subset of the same types. | ||
| 5. The difference between the frames returned by a decoder transform and | ||
| the complementary TorchVision transform are such that a model should | ||
| not be able to tell the difference. | ||
| """ | ||
|
|
||
| @abstractmethod | ||
| def _make_transform_spec(self) -> str: | ||
| pass | ||
|
|
||
|
|
||
| def import_torchvision_transforms_v2() -> ModuleType: | ||
| try: | ||
| from torchvision.transforms import v2 | ||
| except ImportError as e: | ||
| raise RuntimeError( | ||
| "Cannot import TorchVision; this should never happen, please report a bug." | ||
| ) from e | ||
| return v2 | ||
|
|
||
|
|
||
| @dataclass | ||
| class Resize(DecoderTransform): | ||
scotts marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| """Resize the decoded frame to a given size. | ||
|
|
||
| Complementary TorchVision transform: :class:`~torchvision.transforms.v2.Resize`. | ||
| Interpolation is always bilinear. Anti-aliasing is always on. | ||
|
|
||
| Args: | ||
| size: (sequence of int): Desired output size. Must be a sequence of | ||
| the form (height, width). | ||
| """ | ||
|
|
||
| size: Sequence[int] | ||
|
|
||
| def _make_transform_spec(self) -> str: | ||
| assert len(self.size) == 2 | ||
| return f"resize, {self.size[0]}, {self.size[1]}" | ||
|
|
||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note this class method below is new. Because I'm trying to exhaustively catch all of the Also, to state it explicit, |
||
| @classmethod | ||
| def _from_torchvision(cls, resize_tv: nn.Module): | ||
| v2 = import_torchvision_transforms_v2() | ||
|
|
||
| assert isinstance(resize_tv, v2.Resize) | ||
|
|
||
| if resize_tv.interpolation is not v2.InterpolationMode.BILINEAR: | ||
| raise ValueError( | ||
| "TorchVision Resize transform must use bilinear interpolation." | ||
| ) | ||
| if resize_tv.antialias is False: | ||
| raise ValueError( | ||
| "TorchVision Resize transform must have antialias enabled." | ||
| ) | ||
| if resize_tv.size is None: | ||
| raise ValueError("TorchVision Resize transform must have a size specified.") | ||
| if len(resize_tv.size) != 2: | ||
| raise ValueError( | ||
| "TorchVision Resize transform must have a (height, width) " | ||
| f"pair for the size, got {resize_tv.size}." | ||
| ) | ||
| return cls(size=resize_tv.size) | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I was getting linting errors like: https://github.com/meta-pytorch/torchcodec/actions/runs/19157614790/job/54761644331
Which points to docs which recommend the above change: https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports