@@ -163,7 +163,7 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
163163    _keys_to_ignore_on_load_unexpected  =  None 
164164    _no_split_modules  =  None 
165165    _keep_in_fp32_modules  =  None 
166-     _always_upcast_modules  =  None 
166+     _precision_sensitive_module_patterns  =  None 
167167
168168    def  __init__ (self ):
169169        super ().__init__ ()
@@ -344,10 +344,11 @@ def enable_layerwise_upcasting(
344344        memory footprint from model weights, but may lead to some quality degradation in the outputs. Most degradations 
345345        are negligible, mostly stemming from weight casting in normalization and modulation layers. 
346346
347-         By default, most models in diffusers set the `_always_upcast_modules` attribute to ignore patch embedding, 
348-         positional embedding and normalization layers. This is because these layers are most likely precision-critical 
349-         for quality. If you wish to change this behavior, you can set the `_always_upcast_modules` attribute to `None`, 
350-         or call [`~hooks.layerwise_upcasting.apply_layerwise_upcasting`] with custom arguments. 
347+         By default, most models in diffusers set the `_precision_sensitive_module_patterns` attribute to ignore patch 
348+         embedding, positional embedding and normalization layers. This is because these layers are most likely 
349+         precision-critical for quality. If you wish to change this behavior, you can set the 
350+         `_precision_sensitive_module_patterns` attribute to `None`, or call 
351+         [`~hooks.layerwise_upcasting.apply_layerwise_upcasting`] with custom arguments. 
351352
352353        Example: 
353354            Using [`~models.ModelMixin.enable_layerwise_upcasting`]: 
@@ -386,8 +387,8 @@ def enable_layerwise_upcasting(
386387            user_provided_patterns  =  False 
387388        if  self ._keep_in_fp32_modules  is  not None :
388389            skip_modules_pattern  +=  tuple (self ._keep_in_fp32_modules )
389-         if  self ._always_upcast_modules  is  not None :
390-             skip_modules_pattern  +=  tuple (self ._always_upcast_modules )
390+         if  self ._precision_sensitive_module_patterns  is  not None :
391+             skip_modules_pattern  +=  tuple (self ._precision_sensitive_module_patterns )
391392        skip_modules_pattern  =  tuple (set (skip_modules_pattern ))
392393
393394        if  is_peft_available () and  not  user_provided_patterns :
0 commit comments