Skip to content

Commit fdbe06d

Browse files
committed
remove copied from fuse_lora and unfuse_lora as needed.
1 parent 72e4aee commit fdbe06d

File tree

1 file changed

+9
-32
lines changed

1 file changed

+9
-32
lines changed

src/diffusers/loaders/lora_pipeline.py

Lines changed: 9 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1797,7 +1797,7 @@ def save_lora_weights(
17971797

17981798
def fuse_lora(
17991799
self,
1800-
components: List[str] = ["transformer", "text_encoder"],
1800+
components: List[str] = ["transformer"],
18011801
lora_scale: float = 1.0,
18021802
safe_fusing: bool = False,
18031803
adapter_names: Optional[List[str]] = None,
@@ -2499,10 +2499,9 @@ def save_lora_weights(
24992499
safe_serialization=safe_serialization,
25002500
)
25012501

2502-
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.fuse_lora with unet->transformer
25032502
def fuse_lora(
25042503
self,
2505-
components: List[str] = ["transformer", "text_encoder"],
2504+
components: List[str] = ["transformer"],
25062505
lora_scale: float = 1.0,
25072506
safe_fusing: bool = False,
25082507
adapter_names: Optional[List[str]] = None,
@@ -2543,8 +2542,7 @@ def fuse_lora(
25432542
components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names
25442543
)
25452544

2546-
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.unfuse_lora with unet->transformer
2547-
def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs):
2545+
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
25482546
r"""
25492547
Reverses the effect of
25502548
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
@@ -2558,9 +2556,6 @@ def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], *
25582556
Args:
25592557
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
25602558
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
2561-
unfuse_text_encoder (`bool`, defaults to `True`):
2562-
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
2563-
LoRA parameters then it won't have any effect.
25642559
"""
25652560
super().unfuse_lora(components=components)
25662561

@@ -2807,10 +2802,9 @@ def save_lora_weights(
28072802
safe_serialization=safe_serialization,
28082803
)
28092804

2810-
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.fuse_lora with unet->transformer
28112805
def fuse_lora(
28122806
self,
2813-
components: List[str] = ["transformer", "text_encoder"],
2807+
components: List[str] = ["transformer"],
28142808
lora_scale: float = 1.0,
28152809
safe_fusing: bool = False,
28162810
adapter_names: Optional[List[str]] = None,
@@ -2851,8 +2845,7 @@ def fuse_lora(
28512845
components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names
28522846
)
28532847

2854-
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.unfuse_lora with unet->transformer
2855-
def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs):
2848+
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
28562849
r"""
28572850
Reverses the effect of
28582851
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
@@ -2866,9 +2859,6 @@ def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], *
28662859
Args:
28672860
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
28682861
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
2869-
unfuse_text_encoder (`bool`, defaults to `True`):
2870-
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
2871-
LoRA parameters then it won't have any effect.
28722862
"""
28732863
super().unfuse_lora(components=components)
28742864

@@ -3115,10 +3105,9 @@ def save_lora_weights(
31153105
safe_serialization=safe_serialization,
31163106
)
31173107

3118-
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.fuse_lora with unet->transformer
31193108
def fuse_lora(
31203109
self,
3121-
components: List[str] = ["transformer", "text_encoder"],
3110+
components: List[str] = ["transformer"],
31223111
lora_scale: float = 1.0,
31233112
safe_fusing: bool = False,
31243113
adapter_names: Optional[List[str]] = None,
@@ -3159,8 +3148,7 @@ def fuse_lora(
31593148
components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names
31603149
)
31613150

3162-
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.unfuse_lora with unet->transformer
3163-
def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs):
3151+
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
31643152
r"""
31653153
Reverses the effect of
31663154
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
@@ -3174,9 +3162,6 @@ def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], *
31743162
Args:
31753163
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
31763164
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
3177-
unfuse_text_encoder (`bool`, defaults to `True`):
3178-
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
3179-
LoRA parameters then it won't have any effect.
31803165
"""
31813166
super().unfuse_lora(components=components)
31823167

@@ -3423,10 +3408,9 @@ def save_lora_weights(
34233408
safe_serialization=safe_serialization,
34243409
)
34253410

3426-
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.fuse_lora with unet->transformer
34273411
def fuse_lora(
34283412
self,
3429-
components: List[str] = ["transformer", "text_encoder"],
3413+
components: List[str] = ["transformer"],
34303414
lora_scale: float = 1.0,
34313415
safe_fusing: bool = False,
34323416
adapter_names: Optional[List[str]] = None,
@@ -3467,8 +3451,7 @@ def fuse_lora(
34673451
components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names
34683452
)
34693453

3470-
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.unfuse_lora with unet->transformer
3471-
def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs):
3454+
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
34723455
r"""
34733456
Reverses the effect of
34743457
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
@@ -3482,9 +3465,6 @@ def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], *
34823465
Args:
34833466
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
34843467
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
3485-
unfuse_text_encoder (`bool`, defaults to `True`):
3486-
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
3487-
LoRA parameters then it won't have any effect.
34883468
"""
34893469
super().unfuse_lora(components=components)
34903470

@@ -3791,9 +3771,6 @@ def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
37913771
Args:
37923772
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
37933773
unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
3794-
unfuse_text_encoder (`bool`, defaults to `True`):
3795-
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
3796-
LoRA parameters then it won't have any effect.
37973774
"""
37983775
super().unfuse_lora(components=components)
37993776

0 commit comments

Comments
 (0)