Skip to content

Commit cfe6318

Browse files
committed
norm.* -> norm
1 parent e586ef3 commit cfe6318

23 files changed

+23
-23
lines changed

src/diffusers/models/modeling_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ def enable_layerwise_upcasting(
353353
354354
>>> # Or, enable layerwise upcasting with custom arguments via the `apply_layerwise_upcasting` function
355355
>>> apply_layerwise_upcasting(
356-
... transformer, torch.float8_e4m3fn, torch.bfloat16, skip_modules_pattern=["patch_embed", "norm.*"]
356+
... transformer, torch.float8_e4m3fn, torch.bfloat16, skip_modules_pattern=["patch_embed", "norm"]
357357
... )
358358
```
359359

src/diffusers/models/transformers/auraflow_transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ class AuraFlowTransformer2DModel(ModelMixin, ConfigMixin, FromOriginalModelMixin
276276
"""
277277

278278
_no_split_modules = ["AuraFlowJointTransformerBlock", "AuraFlowSingleTransformerBlock", "AuraFlowPatchEmbed"]
279-
_always_upcast_modules = ["pos_embed", "norm.*"]
279+
_always_upcast_modules = ["pos_embed", "norm"]
280280
_supports_gradient_checkpointing = True
281281

282282
@register_to_config

src/diffusers/models/transformers/cogvideox_transformer_3d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
212212
Scaling factor to apply in 3D positional embeddings across temporal dimensions.
213213
"""
214214

215-
_always_upcast_modules = ["patch_embed", "norm.*"]
215+
_always_upcast_modules = ["patch_embed", "norm"]
216216
_supports_gradient_checkpointing = True
217217
_no_split_modules = ["CogVideoXBlock", "CogVideoXPatchEmbed"]
218218

src/diffusers/models/transformers/dit_transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ class DiTTransformer2DModel(ModelMixin, ConfigMixin):
6464
A small constant added to the denominator in normalization layers to prevent division by zero.
6565
"""
6666

67-
_always_upcast_modules = ["pos_embed", "norm.*"]
67+
_always_upcast_modules = ["pos_embed", "norm"]
6868
_supports_gradient_checkpointing = True
6969

7070
@register_to_config

src/diffusers/models/transformers/hunyuan_transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ class HunyuanDiT2DModel(ModelMixin, ConfigMixin):
244244
Whether or not to use style condition and image meta size. True for version <=1.1, False for version >= 1.2
245245
"""
246246

247-
_always_upcast_modules = ["pos_embed", "norm.*", "pooler"]
247+
_always_upcast_modules = ["pos_embed", "norm", "pooler"]
248248

249249
@register_to_config
250250
def __init__(

src/diffusers/models/transformers/latte_transformer_3d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ class LatteTransformer3DModel(ModelMixin, ConfigMixin):
6565
The number of frames in the video-like data.
6666
"""
6767

68-
_always_upcast_modules = ["pos_embed", "norm.*"]
68+
_always_upcast_modules = ["pos_embed", "norm"]
6969

7070
@register_to_config
7171
def __init__(

src/diffusers/models/transformers/lumina_nextdit2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,7 @@ class LuminaNextDiT2DModel(ModelMixin, ConfigMixin):
221221
overall scale of the model's operations.
222222
"""
223223

224-
_always_upcast_modules = ["patch_embedder", "norm.*", "ffn_norm.*"]
224+
_always_upcast_modules = ["patch_embedder", "norm", "ffn_norm"]
225225

226226
@register_to_config
227227
def __init__(

src/diffusers/models/transformers/pixart_transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ class PixArtTransformer2DModel(ModelMixin, ConfigMixin):
7979

8080
_supports_gradient_checkpointing = True
8181
_no_split_modules = ["BasicTransformerBlock", "PatchEmbed"]
82-
_always_upcast_modules = ["pos_embed", "norm.*", "adaln_single"]
82+
_always_upcast_modules = ["pos_embed", "norm", "adaln_single"]
8383

8484
@register_to_config
8585
def __init__(

src/diffusers/models/transformers/sana_transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ class SanaTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
222222

223223
_supports_gradient_checkpointing = True
224224
_no_split_modules = ["SanaTransformerBlock", "PatchEmbed"]
225-
_always_upcast_modules = ["patch_embed", "norm.*"]
225+
_always_upcast_modules = ["patch_embed", "norm"]
226226

227227
@register_to_config
228228
def __init__(

src/diffusers/models/transformers/transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ class Transformer2DModel(LegacyModelMixin, LegacyConfigMixin):
6666

6767
_supports_gradient_checkpointing = True
6868
_no_split_modules = ["BasicTransformerBlock"]
69-
_always_upcast_modules = ["latent_image_embedding", "norm.*"]
69+
_always_upcast_modules = ["latent_image_embedding", "norm"]
7070

7171
@register_to_config
7272
def __init__(

0 commit comments

Comments
 (0)