Skip to content

Commit 6c0d55d

Browse files
committed
up
1 parent db38c47 commit 6c0d55d

File tree

7 files changed

+10
-41
lines changed

7 files changed

+10
-41
lines changed

src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -410,11 +410,7 @@ def upcast_vae(self):
410410
self.vae.to(dtype=torch.float32)
411411
use_torch_2_0_or_xformers = isinstance(
412412
self.vae.decoder.mid_block.attentions[0].processor,
413-
(
414-
AttnProcessor2_0,
415-
XFormersAttnProcessor,
416-
FusedAttnProcessor2_0,
417-
),
413+
(AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0),
418414
)
419415
# if xformers or torch_2_0 is used attention block does not need
420416
# to be in float32 which can save lots of memory

src/diffusers/pipelines/kolors/pipeline_kolors.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -584,11 +584,7 @@ def upcast_vae(self):
584584
self.vae.to(dtype=torch.float32)
585585
use_torch_2_0_or_xformers = isinstance(
586586
self.vae.decoder.mid_block.attentions[0].processor,
587-
(
588-
AttnProcessor2_0,
589-
XFormersAttnProcessor,
590-
FusedAttnProcessor2_0,
591-
),
587+
(AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0),
592588
)
593589
# if xformers or torch_2_0 is used attention block does not need
594590
# to be in float32 which can save lots of memory

src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -712,11 +712,7 @@ def upcast_vae(self):
712712
self.vae.to(dtype=torch.float32)
713713
use_torch_2_0_or_xformers = isinstance(
714714
self.vae.decoder.mid_block.attentions[0].processor,
715-
(
716-
AttnProcessor2_0,
717-
XFormersAttnProcessor,
718-
FusedAttnProcessor2_0,
719-
),
715+
(AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0),
720716
)
721717
# if xformers or torch_2_0 is used attention block does not need
722718
# to be in float32 which can save lots of memory

src/diffusers/pipelines/pag/pipeline_pag_kolors.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
from ...image_processor import PipelineImageInput, VaeImageProcessor
2222
from ...loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin
2323
from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
24-
from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor
24+
from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor
2525
from ...schedulers import KarrasDiffusionSchedulers
2626
from ...utils import is_torch_xla_available, logging, replace_example_docstring
2727
from ...utils.torch_utils import randn_tensor
@@ -602,10 +602,7 @@ def upcast_vae(self):
602602
self.vae.to(dtype=torch.float32)
603603
use_torch_2_0_or_xformers = isinstance(
604604
self.vae.decoder.mid_block.attentions[0].processor,
605-
(
606-
AttnProcessor2_0,
607-
XFormersAttnProcessor,
608-
),
605+
(AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0),
609606
)
610607
# if xformers or torch_2_0 is used attention block does not need
611608
# to be in float32 which can save lots of memory

src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,7 @@
3232
TextualInversionLoaderMixin,
3333
)
3434
from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel
35-
from ...models.attention_processor import (
36-
AttnProcessor2_0,
37-
XFormersAttnProcessor,
38-
)
35+
from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor
3936
from ...models.lora import adjust_lora_scale_text_encoder
4037
from ...schedulers import KarrasDiffusionSchedulers
4138
from ...utils import (
@@ -765,10 +762,7 @@ def upcast_vae(self):
765762
self.vae.to(dtype=torch.float32)
766763
use_torch_2_0_or_xformers = isinstance(
767764
self.vae.decoder.mid_block.attentions[0].processor,
768-
(
769-
AttnProcessor2_0,
770-
XFormersAttnProcessor,
771-
),
765+
(AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0),
772766
)
773767
# if xformers or torch_2_0 is used attention block does not need
774768
# to be in float32 which can save lots of memory

src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,7 @@
3333
TextualInversionLoaderMixin,
3434
)
3535
from ...models import AutoencoderKL, UNet2DConditionModel
36-
from ...models.attention_processor import (
37-
AttnProcessor2_0,
38-
XFormersAttnProcessor,
39-
)
36+
from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor
4037
from ...models.lora import adjust_lora_scale_text_encoder
4138
from ...schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
4239
from ...utils import (
@@ -548,10 +545,7 @@ def upcast_vae(self):
548545
self.vae.to(dtype=torch.float32)
549546
use_torch_2_0_or_xformers = isinstance(
550547
self.vae.decoder.mid_block.attentions[0].processor,
551-
(
552-
AttnProcessor2_0,
553-
XFormersAttnProcessor,
554-
),
548+
(AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0),
555549
)
556550
# if xformers or torch_2_0 is used attention block does not need
557551
# to be in float32 which can save lots of memory

src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -461,11 +461,7 @@ def upcast_vae(self):
461461
self.vae.to(dtype=torch.float32)
462462
use_torch_2_0_or_xformers = isinstance(
463463
self.vae.decoder.mid_block.attentions[0].processor,
464-
(
465-
AttnProcessor2_0,
466-
XFormersAttnProcessor,
467-
FusedAttnProcessor2_0,
468-
),
464+
(AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0),
469465
)
470466
# if xformers or torch_2_0 is used attention block does not need
471467
# to be in float32 which can save lots of memory

0 commit comments

Comments
 (0)