Skip to content

Commit 8353ffb

Browse files
Add missing deprecations for classes and constants in the prototypes directory (#3953)
Co-authored-by: Sam Anklesaria <[email protected]>
1 parent bf4e412 commit 8353ffb

File tree

17 files changed

+67
-29
lines changed

17 files changed

+67
-29
lines changed

docs/source/prototype.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
torchaudio.prototype
22
====================
33

4+
.. warning::
5+
As TorchAudio is no longer being actively developed, this functionality will no longer be supported.
6+
See https://github.com/pytorch/audio/issues/3902 for more details.
7+
48
``torchaudio.prototype`` provides prototype features;
59
they are at an early stage for feedback and testing.
610
Their interfaces might be changed without prior notice.

examples/tutorials/audio_io_tutorial.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,8 @@
99
load them into PyTorch Tensors and save PyTorch Tensors.
1010
1111
.. warning::
12-
13-
There are multiple changes planned/made to audio I/O in recent releases.
14-
For the detail of these changes please refer to
15-
:ref:`Introduction of Dispatcher <dispatcher_migration>`.
12+
IO functionality within TorchAudio has been superseded by `AudioDecoder` from the TorchCodec library.
13+
See https://github.com/pytorch/audio/issues/3902 for more details.`.
1614
1715
"""
1816

examples/tutorials/ctc_forced_alignment_api_tutorial.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44
55
**Author**: `Xiaohui Zhang <[email protected]>`__, `Moto Hira <[email protected]>`__
66
7+
.. warning::
8+
As TorchAudio is no longer being actively developed, this functionality will no longer be supported.
9+
See https://github.com/pytorch/audio/issues/3902 for more details.
10+
711
The forced alignment is a process to align transcript with speech.
812
This tutorial shows how to align transcripts to speech using
913
:py:func:`torchaudio.functional.forced_align` which was developed along the work of

examples/tutorials/effector_tutorial.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44
55
**Author**: `Moto Hira <[email protected]>`__
66
7+
.. warning::
8+
As TorchAudio is no longer being actively developed, this functionality will no longer be supported.
9+
See https://github.com/pytorch/audio/issues/3902 for more details.
10+
711
This tutorial shows how to use :py:class:`torchaudio.io.AudioEffector` to
812
apply various effects and codecs to waveform tensor.
913

src/torchaudio/_internal/module_utils.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,16 @@ def dropping_class_support(c, msg=DEPRECATION_MSG):
123123
UNSUPPORTED.append(c)
124124
return c
125125

126+
def dropping_const_support(c, msg=DEPRECATION_MSG, name=None):
127+
c.__doc__ = f"""[DEPRECATED] {c.__doc__}
128+
129+
.. warning::
130+
131+
This object has been deprecated. It will be removed from the 2.9 release.
132+
{msg}
133+
"""
134+
return c
135+
126136
dropping_class_io_support = partial(dropping_class_support, msg=IO_DEPRECATION_MSG)
127137

128138
dropping_io_support = deprecated(IO_DEPRECATION_MSG, version="2.9", remove=True)

src/torchaudio/prototype/datasets/musan.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,13 @@
44
import torch
55
from torch.utils.data import Dataset
66
from torchaudio.datasets.utils import _load_waveform
7-
from torchaudio._internal.module_utils import dropping_support
7+
from torchaudio._internal.module_utils import dropping_support, dropping_class_support
88

99

1010
_SUBSETS = ["music", "noise", "speech"]
1111
_SAMPLE_RATE = 16_000
1212

13-
13+
@dropping_class_support
1414
class Musan(Dataset):
1515
r"""*MUSAN* :cite:`musan2015` dataset.
1616
@@ -19,7 +19,6 @@ class Musan(Dataset):
1919
subset (str): Subset of the dataset to use. Options: [``"music"``, ``"noise"``, ``"speech"``].
2020
"""
2121

22-
@dropping_support
2322
def __init__(self, root: Union[str, Path], subset: str):
2423
if subset not in _SUBSETS:
2524
raise ValueError(f"Invalid subset '{subset}' given. Please provide one of {_SUBSETS}")

src/torchaudio/prototype/models/__init__.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from torchaudio._internal.module_utils import dropping_const_support
12
from ._conformer_wav2vec2 import (
23
conformer_wav2vec2_base,
34
conformer_wav2vec2_model,
@@ -10,7 +11,9 @@
1011
from .conv_emformer import ConvEmformer
1112
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
1213
from .rnnt import conformer_rnnt_base, conformer_rnnt_biasing, conformer_rnnt_biasing_base, conformer_rnnt_model
13-
from .rnnt_decoder import Hypothesis, RNNTBeamSearchBiasing
14+
from .rnnt_decoder import Hypothesis as _Hypothesis, RNNTBeamSearchBiasing
15+
16+
Hypothesis = dropping_const_support(_Hypothesis, name="Hypothesis")
1417

1518
__all__ = [
1619
"conformer_rnnt_base",

src/torchaudio/prototype/models/_conformer_wav2vec2.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from torchaudio.models.conformer import ConformerLayer
88
from torchaudio.models.rnnt import _TimeReduction
99
from torchaudio.models.wav2vec2 import components
10-
from torchaudio._internal.module_utils import dropping_support
10+
from torchaudio._internal.module_utils import dropping_class_support, dropping_support
1111

1212

1313
def _buffered_arange(max) -> Tensor:
@@ -253,6 +253,7 @@ def extract_features(
253253
return self._get_intermediate_outputs(x, mask=masks, num_layers=num_layers)
254254

255255

256+
@dropping_class_support
256257
class ConformerWav2Vec2PretrainModel(Module):
257258
"""Conformer Wav2Vec2 pre-train model for training from scratch.
258259

src/torchaudio/prototype/models/conv_emformer.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
import torch
55
from torchaudio.models.emformer import _EmformerAttention, _EmformerImpl, _get_weight_init_gains
6-
from torchaudio._internal.module_utils import dropping_support
6+
from torchaudio._internal.module_utils import dropping_class_support, dropping_support
77

88

99

@@ -443,6 +443,7 @@ def infer(
443443
return output_utterance, output_right_context, output_state, next_m
444444

445445

446+
@dropping_class_support
446447
class ConvEmformer(_EmformerImpl):
447448
r"""Implements the convolution-augmented streaming transformer architecture introduced in
448449
*Streaming Transformer Transducer based Speech Recognition Using Non-Causal Convolution*

src/torchaudio/prototype/models/hifi_gan.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,10 @@
2828
import torch.nn as nn
2929
import torch.nn.functional as F
3030
from torch.nn import Conv1d, ConvTranspose1d
31-
from torchaudio._internal.module_utils import dropping_support
31+
from torchaudio._internal.module_utils import dropping_class_support, dropping_support
3232

3333

34+
@dropping_class_support
3435
class HiFiGANVocoder(torch.nn.Module):
3536
"""Generator part of *HiFi GAN* :cite:`NEURIPS2020_c5d73680`.
3637
Source: https://github.com/jik876/hifi-gan/blob/4769534d45265d52a904b850da5a622601885777/models.py#L75

0 commit comments

Comments
 (0)