Skip to content

Commit 914f460

Browse files
committed
delete comments and remove useless import
1 parent 0b01118 commit 914f460

File tree

3 files changed

+0
-43
lines changed

3 files changed

+0
-43
lines changed

src/diffusers/models/downsampling.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414

1515
from typing import Optional, Tuple
1616

17-
import math
1817
import torch
1918
import torch.nn as nn
2019
import torch.nn.functional as F

src/diffusers/models/transformers/easyanimate_transformer_3d.py

Lines changed: 0 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -134,27 +134,11 @@ class EasyAnimateRMSNorm(nn.Module):
134134
This implementation is specifically designed for use in models similar to T5.
135135
"""
136136
def __init__(self, hidden_size, eps=1e-6):
137-
"""
138-
Initializes the RMS normalization layer.
139-
140-
Parameters:
141-
- hidden_size: The size of the hidden layer, used to determine the size of the learnable weight parameters.
142-
- eps: A small value added to the denominator to avoid division by zero during normalization.
143-
"""
144137
super().__init__()
145138
self.weight = nn.Parameter(torch.ones(hidden_size))
146139
self.variance_epsilon = eps
147140

148141
def forward(self, hidden_states):
149-
"""
150-
Performs the forward propagation of the RMS normalization layer.
151-
152-
Parameters:
153-
- hidden_states: The input tensor, usually the output of the previous layer.
154-
155-
Returns:
156-
- The normalized tensor, scaled by the learnable weight parameters.
157-
"""
158142
# Save the input data type for restoring it before returning
159143
input_dtype = hidden_states.dtype
160144
# Convert the input to float32 for accurate calculation
@@ -175,17 +159,6 @@ class EasyAnimateLayerNormZero(nn.Module):
175159
176160
This module applies a learned affine transformation to the input, which is useful for stabilizing the training of deep neural networks.
177161
It is designed to work with both standard and fp32 layer normalization, depending on the `norm_type` parameter.
178-
179-
Parameters:
180-
- conditioning_dim: int, the dimension of the input conditioning vector.
181-
- embedding_dim: int, the dimension of the hidden state and encoder hidden state embeddings.
182-
- elementwise_affine: bool, default True, whether to learn an affine transformation for each element.
183-
- eps: float, default 1e-5, a value added to the denominator for numerical stability.
184-
- bias: bool, default True, whether to include a bias term in the linear transformation.
185-
- norm_type: str, default 'fp32_layer_norm', the type of normalization to apply. Supports 'layer_norm' and 'fp32_layer_norm'.
186-
187-
Raises:
188-
- ValueError: if an unsupported `norm_type` is provided.
189162
"""
190163
def __init__(
191164
self,
@@ -215,20 +188,6 @@ def __init__(
215188
def forward(
216189
self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor
217190
) -> Tuple[torch.Tensor, torch.Tensor]:
218-
"""
219-
Applies the learned affine transformation to the input hidden states and encoder hidden states.
220-
221-
Parameters:
222-
- hidden_states: torch.Tensor, the hidden states tensor.
223-
- encoder_hidden_states: torch.Tensor, the encoder hidden states tensor.
224-
- temb: torch.Tensor, the conditioning input tensor.
225-
226-
Returns:
227-
- hidden_states: torch.Tensor, the transformed hidden states tensor.
228-
- encoder_hidden_states: torch.Tensor, the transformed encoder hidden states tensor.
229-
- gate: torch.Tensor, the gate tensor for hidden states.
230-
- enc_gate: torch.Tensor, the gate tensor for encoder hidden states.
231-
"""
232191
# Apply SiLU activation to temb and then linear transformation, splitting the result into 6 parts
233192
shift, scale, gate, enc_shift, enc_scale, enc_gate = self.linear(self.silu(temb)).chunk(6, dim=1)
234193
# Apply normalization and learned affine transformation to hidden states

src/diffusers/models/upsampling.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414

1515
from typing import Optional, Tuple
1616

17-
import math
1817
import torch
1918
import torch.nn as nn
2019
import torch.nn.functional as F

0 commit comments

Comments
 (0)