@@ -401,8 +401,7 @@ def __init__(
401
401
def forward (self , hidden_states : torch .Tensor ) -> torch .Tensor :
402
402
r"""
403
403
Args:
404
- hidden_states
405
- torch.Tensor of *encoder* input embeddings.
404
+ hidden_states: torch.Tensor of *encoder* input embeddings.
406
405
Returns:
407
406
Encoder layer output torch.Tensor
408
407
"""
@@ -490,10 +489,8 @@ def forward(
490
489
) -> torch .Tensor :
491
490
r"""
492
491
Args:
493
- decoder_hidden_states
494
- torch.Tensor of *decoder* input embeddings.
495
- encoder_hidden_states
496
- torch.Tensor of *encoder* input embeddings.
492
+ decoder_hidden_states: torch.Tensor of *decoder* input embeddings.
493
+ encoder_hidden_states: torch.Tensor of *encoder* input embeddings.
497
494
Returns:
498
495
Decoder layer output torch.Tensor
499
496
"""
@@ -584,12 +581,10 @@ def forward(
584
581
) -> torch .Tensor :
585
582
r"""
586
583
Args:
587
- input_ids
588
- Indices of *encoder* input sequence tokens in the vocabulary.
589
- Padding will be ignored by default should you
590
- provide it.
591
- positions
592
- Positions of *encoder* input sequence tokens.
584
+ input_ids: Indices of *encoder* input sequence tokens in the
585
+ vocabulary.
586
+ Padding will be ignored by default should you provide it.
587
+ positions: Positions of *encoder* input sequence tokens.
593
588
Returns:
594
589
Decoder output torch.Tensor
595
590
"""
@@ -663,14 +658,11 @@ def forward(
663
658
) -> torch .Tensor :
664
659
r"""
665
660
Args:
666
- decoder_input_ids
667
- Indices of *decoder* input sequence tokens in the vocabulary.
668
- Padding will be ignored by default should you
669
- provide it.
670
- decoder_positions
671
- Positions of *decoder* input sequence tokens.
672
- encoder_hidden_states:
673
- Tensor of encoder output embeddings
661
+ decoder_input_ids: Indices of *decoder* input sequence tokens
662
+ in the vocabulary.
663
+ Padding will be ignored by default should you provide it.
664
+ decoder_positions: Positions of *decoder* input sequence tokens.
665
+ encoder_hidden_states: Tensor of encoder output embeddings.
674
666
Returns:
675
667
Decoder output torch.Tensor
676
668
"""
@@ -732,16 +724,13 @@ def forward(self, input_ids: torch.Tensor, positions: torch.Tensor,
732
724
encoder_positions : torch .Tensor ) -> torch .Tensor :
733
725
r"""
734
726
Args:
735
- input_ids
736
- Indices of *decoder* input sequence tokens in the vocabulary.
737
- Padding will be ignored by default should you
738
- provide it.
739
- positions
740
- Positions of *decoder* input sequence tokens.
741
- encoder_input_ids
742
- Indices of *encoder* input sequence tokens in the vocabulary.
743
- encoder_positions:
744
- Positions of *encoder* input sequence tokens.
727
+ input_ids: Indices of *decoder* input sequence tokens
728
+ in the vocabulary.
729
+ Padding will be ignored by default should you provide it.
730
+ positions: Positions of *decoder* input sequence tokens.
731
+ encoder_input_ids: Indices of *encoder* input sequence tokens
732
+ in the vocabulary.
733
+ encoder_positions: Positions of *encoder* input sequence tokens.
745
734
Returns:
746
735
Model output torch.Tensor
747
736
"""
@@ -848,14 +837,10 @@ def forward(
848
837
) -> torch .Tensor :
849
838
r"""
850
839
Args:
851
- input_ids
852
- torch.Tensor of *decoder* input token ids.
853
- positions
854
- torch.Tensor of *decoder* position indices.
855
- encoder_input_ids
856
- torch.Tensor of *encoder* input token ids.
857
- encoder_positions
858
- torch.Tensor of *encoder* position indices
840
+ input_ids: torch.Tensor of *decoder* input token ids.
841
+ positions: torch.Tensor of *decoder* position indices.
842
+ encoder_input_ids: torch.Tensor of *encoder* input token ids.
843
+ encoder_positions: torch.Tensor of *encoder* position indices.
859
844
Returns:
860
845
Output torch.Tensor
861
846
"""
@@ -912,8 +897,7 @@ class MBartEncoderLayer(BartEncoderLayer):
912
897
def forward (self , hidden_states : torch .Tensor ) -> torch .Tensor :
913
898
r"""
914
899
Args:
915
- hidden_states
916
- torch.Tensor of *encoder* input embeddings.
900
+ hidden_states: torch.Tensor of *encoder* input embeddings.
917
901
Returns:
918
902
Encoder layer output torch.Tensor
919
903
"""
@@ -1035,12 +1019,10 @@ def forward(
1035
1019
) -> torch .Tensor :
1036
1020
r"""
1037
1021
Args:
1038
- input_ids
1039
- Indices of *encoder* input sequence tokens in the vocabulary.
1040
- Padding will be ignored by default should you
1041
- provide it.
1042
- positions
1043
- Positions of *encoder* input sequence tokens.
1022
+ input_ids: Indices of *encoder* input sequence tokens in the
1023
+ vocabulary.
1024
+ Padding will be ignored by default should you provide it.
1025
+ positions: Positions of *encoder* input sequence tokens.
1044
1026
Returns:
1045
1027
Decoder output torch.Tensor
1046
1028
"""
@@ -1116,14 +1098,11 @@ def forward(
1116
1098
) -> torch .Tensor :
1117
1099
r"""
1118
1100
Args:
1119
- decoder_input_ids
1120
- Indices of *decoder* input sequence tokens in the vocabulary.
1121
- Padding will be ignored by default should you
1122
- provide it.
1123
- decoder_positions
1124
- Positions of *decoder* input sequence tokens.
1125
- encoder_hidden_states:
1126
- Tensor of encoder output embeddings
1101
+ decoder_input_ids: Indices of *decoder* input sequence tokens
1102
+ in the vocabulary.
1103
+ Padding will be ignored by default should you provide it.
1104
+ decoder_positions: Positions of *decoder* input sequence tokens.
1105
+ encoder_hidden_states: Tensor of encoder output embeddings.
1127
1106
Returns:
1128
1107
Decoder output torch.Tensor
1129
1108
"""
@@ -1185,16 +1164,13 @@ def forward(self, input_ids: torch.Tensor, positions: torch.Tensor,
1185
1164
encoder_positions : torch .Tensor ) -> torch .Tensor :
1186
1165
r"""
1187
1166
Args:
1188
- input_ids
1189
- Indices of *decoder* input sequence tokens in the vocabulary.
1190
- Padding will be ignored by default should you
1191
- provide it.
1192
- positions
1193
- Positions of *decoder* input sequence tokens.
1194
- encoder_input_ids
1195
- Indices of *encoder* input sequence tokens in the vocabulary.
1196
- encoder_positions:
1197
- Positions of *encoder* input sequence tokens.
1167
+ input_ids: Indices of *decoder* input sequence tokens
1168
+ in the vocabulary.
1169
+ Padding will be ignored by default should you provide it.
1170
+ positions: Positions of *decoder* input sequence tokens.
1171
+ encoder_input_ids: Indices of *encoder* input sequence tokens
1172
+ in the vocabulary.
1173
+ encoder_positions: Positions of *encoder* input sequence tokens.
1198
1174
Returns:
1199
1175
Model output torch.Tensor
1200
1176
"""
0 commit comments