Skip to content

Commit ebfe343

Browse files
[from_single_file] Fix circular import (#4259)
* up * finish * fix final
1 parent 5ef6b8f commit ebfe343

File tree

2 files changed

+35
-16
lines changed

2 files changed

+35
-16
lines changed

src/diffusers/loaders.py

Lines changed: 29 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -25,22 +25,6 @@
2525
from huggingface_hub import hf_hub_download
2626
from torch import nn
2727

28-
from .models.attention_processor import (
29-
LORA_ATTENTION_PROCESSORS,
30-
AttnAddedKVProcessor,
31-
AttnAddedKVProcessor2_0,
32-
AttnProcessor,
33-
AttnProcessor2_0,
34-
CustomDiffusionAttnProcessor,
35-
CustomDiffusionXFormersAttnProcessor,
36-
LoRAAttnAddedKVProcessor,
37-
LoRAAttnProcessor,
38-
LoRAAttnProcessor2_0,
39-
LoRALinearLayer,
40-
LoRAXFormersAttnProcessor,
41-
SlicedAttnAddedKVProcessor,
42-
XFormersAttnProcessor,
43-
)
4428
from .utils import (
4529
DIFFUSERS_CACHE,
4630
HF_HUB_OFFLINE,
@@ -83,6 +67,8 @@
8367
class PatchedLoraProjection(nn.Module):
8468
def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None):
8569
super().__init__()
70+
from .models.attention_processor import LoRALinearLayer
71+
8672
self.regular_linear_layer = regular_linear_layer
8773

8874
device = self.regular_linear_layer.weight.device
@@ -231,6 +217,17 @@ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict
231217
information.
232218
233219
"""
220+
from .models.attention_processor import (
221+
AttnAddedKVProcessor,
222+
AttnAddedKVProcessor2_0,
223+
CustomDiffusionAttnProcessor,
224+
LoRAAttnAddedKVProcessor,
225+
LoRAAttnProcessor,
226+
LoRAAttnProcessor2_0,
227+
LoRAXFormersAttnProcessor,
228+
SlicedAttnAddedKVProcessor,
229+
XFormersAttnProcessor,
230+
)
234231

235232
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
236233
force_download = kwargs.pop("force_download", False)
@@ -423,6 +420,11 @@ def save_attn_procs(
423420
`DIFFUSERS_SAVE_MODE`.
424421
425422
"""
423+
from .models.attention_processor import (
424+
CustomDiffusionAttnProcessor,
425+
CustomDiffusionXFormersAttnProcessor,
426+
)
427+
426428
weight_name = weight_name or deprecate(
427429
"weights_name",
428430
"0.20.0",
@@ -1317,6 +1319,17 @@ def unload_lora_weights(self):
13171319
>>> ...
13181320
```
13191321
"""
1322+
from .models.attention_processor import (
1323+
LORA_ATTENTION_PROCESSORS,
1324+
AttnProcessor,
1325+
AttnProcessor2_0,
1326+
LoRAAttnAddedKVProcessor,
1327+
LoRAAttnProcessor,
1328+
LoRAAttnProcessor2_0,
1329+
LoRAXFormersAttnProcessor,
1330+
XFormersAttnProcessor,
1331+
)
1332+
13201333
unet_attention_classes = {type(processor) for _, processor in self.unet.attn_processors.items()}
13211334

13221335
if unet_attention_classes.issubset(LORA_ATTENTION_PROCESSORS):

src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -799,6 +799,9 @@ def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder
799799
for param_name, param in text_model_dict.items():
800800
set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
801801
else:
802+
if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
803+
text_model_dict.pop("text_model.embeddings.position_ids", None)
804+
802805
text_model.load_state_dict(text_model_dict)
803806

804807
return text_model
@@ -960,6 +963,9 @@ def convert_open_clip_checkpoint(
960963
for param_name, param in text_model_dict.items():
961964
set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
962965
else:
966+
if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
967+
text_model_dict.pop("text_model.embeddings.position_ids", None)
968+
963969
text_model.load_state_dict(text_model_dict)
964970

965971
return text_model

0 commit comments

Comments
 (0)