Skip to content

Commit c5c7588

Browse files
committed
update
1 parent 6ccaed7 commit c5c7588

File tree

3 files changed

+2
-38
lines changed

3 files changed

+2
-38
lines changed

src/diffusers/models/normalization.py

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -243,35 +243,6 @@ def forward(
243243
return x, gate_msa
244244

245245

246-
class AdaLayerNormZeroSinglePruned(nn.Module):
247-
r"""
248-
Norm layer adaptive layer norm zero (adaLN-Zero).
249-
250-
Parameters:
251-
embedding_dim (`int`): The size of each embedding vector.
252-
num_embeddings (`int`): The size of the embeddings dictionary.
253-
"""
254-
255-
def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
256-
super().__init__()
257-
258-
if norm_type == "layer_norm":
259-
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
260-
else:
261-
raise ValueError(
262-
f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'."
263-
)
264-
265-
def forward(
266-
self,
267-
x: torch.Tensor,
268-
emb: Optional[torch.Tensor] = None,
269-
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
270-
shift_msa, scale_msa, gate_msa = emb.squeeze(0).chunk(3, dim=0)
271-
x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
272-
return x, gate_msa
273-
274-
275246
class LuminaRMSNormZero(nn.Module):
276247
"""
277248
Norm layer adaptive RMS normalization zero.

src/diffusers/models/transformers/transformer_chroma.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,7 @@ def __init__(
8989
persistent=False,
9090
)
9191

92-
def forward(
93-
self, timestep: torch.Tensor, guidance: Optional[torch.Tensor], pooled_projections: torch.Tensor
94-
) -> torch.Tensor:
92+
def forward(self, timestep: torch.Tensor) -> torch.Tensor:
9593
mod_index_length = self.mod_proj.shape[0]
9694

9795
timesteps_proj = self.time_proj(timestep).to(dtype=timestep.dtype)
@@ -642,7 +640,7 @@ def forward(
642640
if guidance is not None:
643641
guidance = guidance.to(hidden_states.dtype) * 1000
644642

645-
input_vec = self.time_text_embed(timestep, guidance, pooled_projections)
643+
input_vec = self.time_text_embed(timestep)
646644
pooled_temb = self.distilled_guidance_layer(input_vec)
647645

648646
encoder_hidden_states = self.context_embedder(encoder_hidden_states)

src/diffusers/pipelines/flux/pipeline_flux.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -265,11 +265,6 @@ def _get_t5_prompt_embeds(
265265
prompt_embeds = self.text_encoder_2(
266266
text_input_ids.to(device),
267267
output_hidden_states=False,
268-
attention_mask=(
269-
self._get_chroma_attn_mask(text_inputs.length, max_sequence_length).to(device)
270-
if self.variant == "chroma"
271-
else None
272-
),
273268
)[0]
274269

275270
dtype = self.text_encoder_2.dtype

0 commit comments

Comments
 (0)