Skip to content

Commit aa1abb0

Browse files
authored
Merge branch 'main' into flux
2 parents 20614bd + 01780c3 commit aa1abb0

File tree

6 files changed

+51
-12
lines changed

6 files changed

+51
-12
lines changed

docs/source/en/api/pipelines/hunyuan_video.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
<Tip>
2222

23-
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers.md) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading.md#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
23+
Make sure to check out the Schedulers [guide](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
2424

2525
</Tip>
2626

docs/source/en/api/pipelines/hunyuandit.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ HunyuanDiT has the following components:
3030

3131
<Tip>
3232

33-
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers.md) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading.md#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
33+
Make sure to check out the Schedulers [guide](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
3434

3535
</Tip>
3636

src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,18 @@
2121
from ...models import AuraFlowTransformer2DModel, AutoencoderKL
2222
from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor
2323
from ...schedulers import FlowMatchEulerDiscreteScheduler
24-
from ...utils import logging, replace_example_docstring
24+
from ...utils import is_torch_xla_available, logging, replace_example_docstring
2525
from ...utils.torch_utils import randn_tensor
2626
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
2727

2828

29+
if is_torch_xla_available():
30+
import torch_xla.core.xla_model as xm
31+
32+
XLA_AVAILABLE = True
33+
else:
34+
XLA_AVAILABLE = False
35+
2936
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
3037

3138

@@ -564,6 +571,9 @@ def __call__(
564571
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
565572
progress_bar.update()
566573

574+
if XLA_AVAILABLE:
575+
xm.mark_step()
576+
567577
if output_type == "latent":
568578
image = latents
569579
else:

src/diffusers/pipelines/sana/pipeline_sana.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
USE_PEFT_BACKEND,
3232
is_bs4_available,
3333
is_ftfy_available,
34+
is_torch_xla_available,
3435
logging,
3536
replace_example_docstring,
3637
scale_lora_layers,
@@ -46,6 +47,13 @@
4647
from .pipeline_output import SanaPipelineOutput
4748

4849

50+
if is_torch_xla_available():
51+
import torch_xla.core.xla_model as xm
52+
53+
XLA_AVAILABLE = True
54+
else:
55+
XLA_AVAILABLE = False
56+
4957
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
5058

5159
if is_bs4_available():
@@ -864,6 +872,9 @@ def __call__(
864872
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
865873
progress_bar.update()
866874

875+
if XLA_AVAILABLE:
876+
xm.mark_step()
877+
867878
if output_type == "latent":
868879
image = latents
869880
else:

src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -240,12 +240,21 @@ def __init__(
240240
transformer=transformer,
241241
scheduler=scheduler,
242242
)
243-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
243+
self.vae_scale_factor = (
244+
2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
245+
)
246+
latent_channels = self.vae.config.latent_channels if hasattr(self, "vae") and self.vae is not None else 16
244247
self.image_processor = VaeImageProcessor(
245-
vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels
248+
vae_scale_factor=self.vae_scale_factor, vae_latent_channels=latent_channels
249+
)
250+
self.tokenizer_max_length = (
251+
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
252+
)
253+
self.default_sample_size = (
254+
self.transformer.config.sample_size
255+
if hasattr(self, "transformer") and self.transformer is not None
256+
else 128
246257
)
247-
self.tokenizer_max_length = self.tokenizer.model_max_length
248-
self.default_sample_size = self.transformer.config.sample_size
249258
self.patch_size = (
250259
self.transformer.config.patch_size if hasattr(self, "transformer") and self.transformer is not None else 2
251260
)

src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -239,19 +239,28 @@ def __init__(
239239
transformer=transformer,
240240
scheduler=scheduler,
241241
)
242-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
242+
self.vae_scale_factor = (
243+
2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
244+
)
245+
latent_channels = self.vae.config.latent_channels if hasattr(self, "vae") and self.vae is not None else 16
243246
self.image_processor = VaeImageProcessor(
244-
vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels
247+
vae_scale_factor=self.vae_scale_factor, vae_latent_channels=latent_channels
245248
)
246249
self.mask_processor = VaeImageProcessor(
247250
vae_scale_factor=self.vae_scale_factor,
248-
vae_latent_channels=self.vae.config.latent_channels,
251+
vae_latent_channels=latent_channels,
249252
do_normalize=False,
250253
do_binarize=True,
251254
do_convert_grayscale=True,
252255
)
253-
self.tokenizer_max_length = self.tokenizer.model_max_length
254-
self.default_sample_size = self.transformer.config.sample_size
256+
self.tokenizer_max_length = (
257+
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
258+
)
259+
self.default_sample_size = (
260+
self.transformer.config.sample_size
261+
if hasattr(self, "transformer") and self.transformer is not None
262+
else 128
263+
)
255264
self.patch_size = (
256265
self.transformer.config.patch_size if hasattr(self, "transformer") and self.transformer is not None else 2
257266
)

0 commit comments

Comments
 (0)