@@ -163,7 +163,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
163163 _keys_to_ignore_on_load_unexpected = None
164164 _no_split_modules = None
165165 _keep_in_fp32_modules = None
166- _precision_sensitive_module_patterns = None
166+ _skip_layerwise_casting_patterns = None
167167
168168 def __init__ (self ):
169169 super ().__init__ ()
@@ -344,10 +344,10 @@ def enable_layerwise_upcasting(
344344 memory footprint from model weights, but may lead to some quality degradation in the outputs. Most degradations
345345 are negligible, mostly stemming from weight casting in normalization and modulation layers.
346346
347- By default, most models in diffusers set the `_precision_sensitive_module_patterns ` attribute to ignore patch
347+ By default, most models in diffusers set the `_skip_layerwise_casting_patterns ` attribute to ignore patch
348348 embedding, positional embedding and normalization layers. This is because these layers are most likely
349349 precision-critical for quality. If you wish to change this behavior, you can set the
350- `_precision_sensitive_module_patterns ` attribute to `None`, or call
350+ `_skip_layerwise_casting_patterns ` attribute to `None`, or call
351351 [`~hooks.layerwise_upcasting.apply_layerwise_upcasting`] with custom arguments.
352352
353353 Example:
@@ -387,8 +387,8 @@ def enable_layerwise_upcasting(
387387 user_provided_patterns = False
388388 if self ._keep_in_fp32_modules is not None :
389389 skip_modules_pattern += tuple (self ._keep_in_fp32_modules )
390- if self ._precision_sensitive_module_patterns is not None :
391- skip_modules_pattern += tuple (self ._precision_sensitive_module_patterns )
390+ if self ._skip_layerwise_casting_patterns is not None :
391+ skip_modules_pattern += tuple (self ._skip_layerwise_casting_patterns )
392392 skip_modules_pattern = tuple (set (skip_modules_pattern ))
393393
394394 if is_peft_available () and not user_provided_patterns :
0 commit comments