We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 510e1d6 commit 3d3aae3Copy full SHA for 3d3aae3
src/diffusers/models/transformers/sana_transformer.py
@@ -257,14 +257,14 @@ def __init__(
257
inner_dim = num_attention_heads * attention_head_dim
258
259
# 1. Patch Embedding
260
- interpolation_scale = interpolation_scale if interpolation_scale is not None else max(sample_size // 64, 1)
261
self.patch_embed = PatchEmbed(
262
height=sample_size,
263
width=sample_size,
264
patch_size=patch_size,
265
in_channels=in_channels,
266
embed_dim=inner_dim,
267
interpolation_scale=interpolation_scale,
+ pos_embed_type="sincos" if interpolation_scale is not None else None,
268
)
269
270
# 2. Additional condition embeddings
0 commit comments