Skip to content

Commit 4e2cc1c

Browse files
committed
update
1 parent e32cb31 commit 4e2cc1c

File tree

2 files changed

+13
-4
lines changed

2 files changed

+13
-4
lines changed

src/diffusers/loaders/lora_base.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -465,7 +465,6 @@ class LoraBaseMixin:
465465
"""Utility class for handling LoRAs."""
466466

467467
_lora_loadable_modules = []
468-
_merged_adapters = set()
469468

470469
def load_lora_weights(self, **kwargs):
471470
raise NotImplementedError("`load_lora_weights()` is not implemented.")
@@ -505,6 +504,12 @@ def _best_guess_weight_name(cls, *args, **kwargs):
505504
deprecate("_best_guess_weight_name", "0.35.0", deprecation_message)
506505
return _best_guess_weight_name(*args, **kwargs)
507506

507+
@property
508+
def _merged_adapters(self):
509+
if "_merged_adapters" not in self.__dict__:
510+
self.__dict__["_merged_adapters"] = set()
511+
return self.__dict__["_merged_adapters"]
512+
508513
def unload_lora_weights(self):
509514
"""
510515
Unloads the LoRA parameters.

tests/lora/utils.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -877,11 +877,15 @@ def test_simple_inference_with_text_denoiser_lora_unfused(
877877
pipe, denoiser = self.check_if_adapters_added_correctly(pipe, text_lora_config, denoiser_lora_config)
878878

879879
pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules)
880-
assert pipe.num_fused_loras == 1, pipe.num_fused_loras
880+
self.assertTrue(
881+
pipe.num_fused_loras == 1, pipe.num_fused_loras, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}"
882+
)
881883
output_fused_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
882884

883885
pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules)
884-
assert pipe.num_fused_loras == 0, pipe.num_fused_loras
886+
self.assertTrue(
887+
pipe.num_fused_loras == 0, pipe.num_fused_loras, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}"
888+
)
885889
output_unfused_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
886890

887891
# unloading should remove the LoRA layers
@@ -1703,7 +1707,7 @@ def test_lora_scale_kwargs_match_fusion(self, expected_atol: float = 1e-3, expec
17031707
adapter_names=["adapter-1"],
17041708
lora_scale=lora_scale,
17051709
)
1706-
assert pipe.num_fused_loras == 1, pipe.num_fused_loras
1710+
self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}")
17071711

17081712
outputs_lora_1_fused = pipe(**inputs, generator=torch.manual_seed(0))[0]
17091713

0 commit comments

Comments
 (0)