Skip to content

Commit 6ce0778

Browse files
committed
apply suggestions from review
1 parent 21a6f79 commit 6ce0778

File tree

2 files changed

+9
-0
lines changed

2 files changed

+9
-0
lines changed

src/diffusers/models/transformers/cogvideox_transformer_3d.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,13 @@ def __init__(
240240
super().__init__()
241241
inner_dim = num_attention_heads * attention_head_dim
242242

243+
if not use_rotary_positional_embeddings and use_learned_positional_embeddings:
244+
raise ValueError(
245+
"There are no CogVideoX checkpoints available with disable rotary embeddings and learned positional "
246+
"embeddings. If you're using a custom model and/or believe this should be supported, please open an "
247+
"issue at https://github.com/huggingface/diffusers/issues."
248+
)
249+
243250
# 1. Patch embedding
244251
self.patch_embed = CogVideoXPatchEmbed(
245252
patch_size=patch_size,

tests/pipelines/cogvideo/test_cogvideox_image2video.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,8 @@ def get_dummy_components(self):
8080
patch_size=2,
8181
temporal_compression_ratio=4,
8282
max_text_seq_length=16,
83+
use_rotary_positional_embeddings=True,
84+
use_learned_positional_embeddings=True,
8385
)
8486

8587
torch.manual_seed(0)

0 commit comments

Comments
 (0)