We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent be55fa6 commit 6b9fd09Copy full SHA for 6b9fd09
src/diffusers/models/modeling_utils.py
@@ -263,7 +263,7 @@ def disable_xformers_memory_efficient_attention(self) -> None:
263
"""
264
self.set_use_memory_efficient_attention_xformers(False)
265
266
- def enable_dynamic_upcasting(self, upcast_dtype=None):
+ def enable_layerwise_upcasting(self, upcast_dtype=None):
267
upcast_dtype = upcast_dtype or torch.float32
268
downcast_dtype = self.dtype
269
0 commit comments