Skip to content

Commit 8267677

Browse files
committed
start folderizing the loaders.
1 parent ce1063a commit 8267677

File tree

16 files changed

+79
-48
lines changed

16 files changed

+79
-48
lines changed

src/diffusers/loaders/__init__.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def text_encoder_attn_modules(text_encoder):
5454
_import_structure = {}
5555

5656
if is_torch_available():
57-
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
57+
_import_structure["single_file.single_file_model"] = ["FromOriginalModelMixin"]
5858
_import_structure["transformer_flux"] = ["FluxTransformer2DLoadersMixin"]
5959
_import_structure["transformer_sd3"] = ["SD3Transformer2DLoadersMixin"]
6060
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
@@ -77,6 +77,7 @@ def text_encoder_attn_modules(text_encoder):
7777
"SanaLoraLoaderMixin",
7878
"Lumina2LoraLoaderMixin",
7979
"WanLoraLoaderMixin",
80+
"LoraBaseMixin",
8081
]
8182
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
8283
_import_structure["ip_adapter"] = [
@@ -90,25 +91,21 @@ def text_encoder_attn_modules(text_encoder):
9091

9192
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
9293
if is_torch_available():
93-
from .single_file_model import FromOriginalModelMixin
94-
from .transformer_flux import FluxTransformer2DLoadersMixin
95-
from .transformer_sd3 import SD3Transformer2DLoadersMixin
94+
from .ip_adapter import FluxTransformer2DLoadersMixin, SD3Transformer2DLoadersMixin
95+
from .single_file import FromOriginalModelMixin
9696
from .unet import UNet2DConditionLoadersMixin
9797
from .utils import AttnProcsLayers
9898

9999
if is_transformers_available():
100-
from .ip_adapter import (
101-
FluxIPAdapterMixin,
102-
IPAdapterMixin,
103-
SD3IPAdapterMixin,
104-
)
105-
from .lora_pipeline import (
100+
from .ip_adapter import FluxIPAdapterMixin, IPAdapterMixin, SD3IPAdapterMixin
101+
from .lora import (
106102
AmusedLoraLoaderMixin,
107103
AuraFlowLoraLoaderMixin,
108104
CogVideoXLoraLoaderMixin,
109105
CogView4LoraLoaderMixin,
110106
FluxLoraLoaderMixin,
111107
HunyuanVideoLoraLoaderMixin,
108+
LoraBaseMixin,
112109
LoraLoaderMixin,
113110
LTXVideoLoraLoaderMixin,
114111
Lumina2LoraLoaderMixin,
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from ...utils.import_utils import is_torch_available, is_transformers_available
2+
3+
4+
if is_torch_available():
5+
from .transformer_flux import FluxTransformer2DLoadersMixin
6+
from .transformer_sd3 import SD3Transformer2DLoadersMixin
7+
8+
if is_transformers_available():
9+
from .ip_adapter import FluxIPAdapterMixin, IPAdapterMixin, SD3IPAdapterMixin
File renamed without changes.

src/diffusers/loaders/transformer_flux.py renamed to src/diffusers/loaders/ip_adapter/transformer_flux.py

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,21 +13,11 @@
1313
# limitations under the License.
1414
from contextlib import nullcontext
1515

16-
from ..models.embeddings import (
17-
ImageProjection,
18-
MultiIPAdapterImageProjection,
19-
)
20-
from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
21-
from ..utils import (
22-
is_accelerate_available,
23-
is_torch_version,
24-
logging,
25-
)
16+
from ...models.embeddings import ImageProjection, MultiIPAdapterImageProjection
17+
from ...models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
18+
from ...utils import is_accelerate_available, is_torch_version, logging
2619

2720

28-
if is_accelerate_available():
29-
pass
30-
3121
logger = logging.get_logger(__name__)
3222

3323

@@ -88,9 +78,7 @@ def _convert_ip_adapter_image_proj_to_diffusers(self, state_dict, low_cpu_mem_us
8878
return image_projection
8979

9080
def _convert_ip_adapter_attn_to_diffusers(self, state_dicts, low_cpu_mem_usage=_LOW_CPU_MEM_USAGE_DEFAULT):
91-
from ..models.attention_processor import (
92-
FluxIPAdapterJointAttnProcessor2_0,
93-
)
81+
from ...models.attention_processor import FluxIPAdapterJointAttnProcessor2_0
9482

9583
if low_cpu_mem_usage:
9684
if is_accelerate_available():

src/diffusers/loaders/transformer_sd3.py renamed to src/diffusers/loaders/ip_adapter/transformer_sd3.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@
1414
from contextlib import nullcontext
1515
from typing import Dict
1616

17-
from ..models.attention_processor import SD3IPAdapterJointAttnProcessor2_0
18-
from ..models.embeddings import IPAdapterTimeImageProjection
19-
from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
20-
from ..utils import is_accelerate_available, is_torch_version, logging
17+
from ...models.attention_processor import SD3IPAdapterJointAttnProcessor2_0
18+
from ...models.embeddings import IPAdapterTimeImageProjection
19+
from ...models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
20+
from ...utils import is_accelerate_available, is_torch_version, logging
2121

2222

2323
logger = logging.get_logger(__name__)
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
from ...utils import is_peft_available, is_torch_available, is_transformers_available
2+
3+
4+
if is_torch_available():
5+
from .lora_base import LoraBaseMixin
6+
7+
if is_transformers_available():
8+
from .lora_pipeline import (
9+
AmusedLoraLoaderMixin,
10+
AuraFlowLoraLoaderMixin,
11+
CogVideoXLoraLoaderMixin,
12+
CogView4LoraLoaderMixin,
13+
FluxLoraLoaderMixin,
14+
HunyuanVideoLoraLoaderMixin,
15+
LoraLoaderMixin,
16+
LTXVideoLoraLoaderMixin,
17+
Lumina2LoraLoaderMixin,
18+
Mochi1LoraLoaderMixin,
19+
SanaLoraLoaderMixin,
20+
SD3LoraLoaderMixin,
21+
StableDiffusionLoraLoaderMixin,
22+
StableDiffusionXLLoraLoaderMixin,
23+
WanLoraLoaderMixin,
24+
)
File renamed without changes.
File renamed without changes.
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
from ...utils import is_torch_available, is_transformers_available
2+
3+
4+
if is_torch_available():
5+
from .single_file_model import FromOriginalModelMixin
6+
7+
if is_transformers_available():
8+
from .single_file import FromSingleFileMixin

0 commit comments

Comments
 (0)