We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 4b9f1c7 commit 98cda21Copy full SHA for 98cda21
src/diffusers/models/transformers/sana_transformer.py
@@ -250,14 +250,14 @@ def __init__(
250
inner_dim = num_attention_heads * attention_head_dim
251
252
# 1. Patch Embedding
253
- interpolation_scale = interpolation_scale if interpolation_scale is not None else max(sample_size // 64, 1)
254
self.patch_embed = PatchEmbed(
255
height=sample_size,
256
width=sample_size,
257
patch_size=patch_size,
258
in_channels=in_channels,
259
embed_dim=inner_dim,
260
interpolation_scale=interpolation_scale,
+ pos_embed_type="sincos" if interpolation_scale is not None else None,
261
)
262
263
# 2. Additional condition embeddings
0 commit comments