Skip to content

Commit 3c498ef

Browse files
committed
apply_faster_cache -> apply_fastercache
1 parent f3cb80c commit 3c498ef

File tree

5 files changed

+16
-16
lines changed

5 files changed

+16
-16
lines changed

src/diffusers/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,7 @@
423423
"WuerstchenCombinedPipeline",
424424
"WuerstchenDecoderPipeline",
425425
"WuerstchenPriorPipeline",
426-
"apply_faster_cache",
426+
"apply_fastercache",
427427
]
428428
)
429429

@@ -912,7 +912,7 @@
912912
WuerstchenCombinedPipeline,
913913
WuerstchenDecoderPipeline,
914914
WuerstchenPriorPipeline,
915-
apply_faster_cache,
915+
apply_fastercache,
916916
)
917917

918918
try:

src/diffusers/pipelines/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@
5858
"StableDiffusionMixin",
5959
"ImagePipelineOutput",
6060
]
61-
_import_structure["faster_cache_utils"] = ["FasterCacheConfig", "apply_faster_cache"]
61+
_import_structure["faster_cache_utils"] = ["FasterCacheConfig", "apply_fastercache"]
6262
_import_structure["deprecated"].extend(
6363
[
6464
"PNDMPipeline",
@@ -450,7 +450,7 @@
450450
from .ddpm import DDPMPipeline
451451
from .deprecated import KarrasVePipeline, LDMPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline
452452
from .dit import DiTPipeline
453-
from .faster_cache_utils import FasterCacheConfig, apply_faster_cache
453+
from .fastercache_utils import FasterCacheConfig, apply_fastercache
454454
from .latent_diffusion import LDMSuperResolutionPipeline
455455
from .pipeline_utils import (
456456
AudioPipelineOutput,

src/diffusers/pipelines/faster_cache_utils.py renamed to src/diffusers/pipelines/fastercache_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ def reset(self):
225225
self.is_guidance_distilled = None
226226

227227

228-
def apply_faster_cache(
228+
def apply_fastercache(
229229
pipeline: DiffusionPipeline,
230230
config: Optional[FasterCacheConfig] = None,
231231
) -> None:
@@ -241,7 +241,7 @@ def apply_faster_cache(
241241
Example:
242242
```python
243243
>>> import torch
244-
>>> from diffusers import CogVideoXPipeline, FasterCacheConfig, apply_faster_cache
244+
>>> from diffusers import CogVideoXPipeline, FasterCacheConfig, apply_fastercache
245245
246246
>>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
247247
>>> pipe.to("cuda")
@@ -255,7 +255,7 @@ def apply_faster_cache(
255255
... attention_weight_callback=lambda _: 0.3,
256256
... tensor_format="BFCHW",
257257
... )
258-
>>> apply_faster_cache(pipe, config)
258+
>>> apply_fastercache(pipe, config)
259259
```
260260
"""
261261

src/diffusers/utils/dummy_torch_and_transformers_objects.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2552,5 +2552,5 @@ def from_pretrained(cls, *args, **kwargs):
25522552
requires_backends(cls, ["torch", "transformers"])
25532553

25542554

2555-
def apply_faster_cache(*args, **kwargs):
2556-
requires_backends(apply_faster_cache, ["torch", "transformers"])
2555+
def apply_fastercache(*args, **kwargs):
2556+
requires_backends(apply_fastercache, ["torch", "transformers"])

tests/pipelines/test_pipelines_common.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
StableDiffusionPipeline,
2929
StableDiffusionXLPipeline,
3030
UNet2DConditionModel,
31-
apply_faster_cache,
31+
apply_fastercache,
3232
)
3333
from diffusers.image_processor import VaeImageProcessor
3434
from diffusers.loaders import FluxIPAdapterMixin, IPAdapterMixin
@@ -37,7 +37,7 @@
3737
from diffusers.models.unets.unet_3d_condition import UNet3DConditionModel
3838
from diffusers.models.unets.unet_i2vgen_xl import I2VGenXLUNet
3939
from diffusers.models.unets.unet_motion_model import UNetMotionModel
40-
from diffusers.pipelines.faster_cache_utils import FasterCacheBlockHook, FasterCacheDenoiserHook
40+
from diffusers.pipelines.fastercache_utils import FasterCacheBlockHook, FasterCacheDenoiserHook
4141
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
4242
from diffusers.schedulers import KarrasDiffusionSchedulers
4343
from diffusers.utils import logging
@@ -2291,21 +2291,21 @@ def test_fastercache_basic_warning_or_errors_raised(self):
22912291
# Check if warning is raised when no FasterCacheConfig is provided
22922292
pipe = self.pipeline_class(**components)
22932293
with CaptureLogger(logger) as cap_logger:
2294-
apply_faster_cache(pipe)
2294+
apply_fastercache(pipe)
22952295
self.assertTrue("No FasterCacheConfig provided" in cap_logger.out)
22962296

22972297
# Check if warning is raise when no attention_weight_callback is provided
22982298
pipe = self.pipeline_class(**components)
22992299
with CaptureLogger(logger) as cap_logger:
23002300
config = FasterCacheConfig(spatial_attention_block_skip_range=2, attention_weight_callback=None)
2301-
apply_faster_cache(pipe, config)
2301+
apply_fastercache(pipe, config)
23022302
self.assertTrue("No `attention_weight_callback` provided when enabling FasterCache" in cap_logger.out)
23032303

23042304
# Check if error raised when unsupported tensor format used
23052305
pipe = self.pipeline_class(**components)
23062306
with self.assertRaises(ValueError):
23072307
config = FasterCacheConfig(spatial_attention_block_skip_range=2, tensor_format="BFHWC")
2308-
apply_faster_cache(pipe, config)
2308+
apply_fastercache(pipe, config)
23092309

23102310
def test_fastercache_inference(self, expected_atol: float = 0.1):
23112311
device = "cpu" # ensure determinism for the device-dependent torch.Generator
@@ -2321,7 +2321,7 @@ def test_fastercache_inference(self, expected_atol: float = 0.1):
23212321
original_image_slice = output.flatten()
23222322
original_image_slice = np.concatenate((original_image_slice[:8], original_image_slice[-8:]))
23232323

2324-
apply_faster_cache(pipe, self.fastercache_config)
2324+
apply_fastercache(pipe, self.fastercache_config)
23252325

23262326
inputs = self.get_dummy_inputs(device)
23272327
inputs["num_inference_steps"] = 4
@@ -2353,7 +2353,7 @@ def test_fastercache_state(self):
23532353
pipe = self.pipeline_class(**components)
23542354
pipe.set_progress_bar_config(disable=None)
23552355

2356-
apply_faster_cache(pipe, self.fastercache_config)
2356+
apply_fastercache(pipe, self.fastercache_config)
23572357

23582358
expected_hooks = 0
23592359
if self.fastercache_config.spatial_attention_block_skip_range is not None:

0 commit comments

Comments
 (0)