diff --git a/src/diffusers/models/transformers/transformer_hunyuan_video.py b/src/diffusers/models/transformers/transformer_hunyuan_video.py index e3f24d97f3fa..6cb97af93652 100644 --- a/src/diffusers/models/transformers/transformer_hunyuan_video.py +++ b/src/diffusers/models/transformers/transformer_hunyuan_video.py @@ -721,6 +721,7 @@ def forward( for i in range(batch_size): attention_mask[i, : effective_sequence_length[i], : effective_sequence_length[i]] = True + attention_mask = attention_mask.unsqueeze(1) # [B, 1, N, N], for broadcasting across attention heads # 4. Transformer blocks if torch.is_grad_enabled() and self.gradient_checkpointing: