Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions src/diffusers/pipelines/allegro/pipeline_allegro.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
deprecate,
is_bs4_available,
is_ftfy_available,
is_torch_xla_available,
logging,
replace_example_docstring,
)
Expand All @@ -41,6 +42,14 @@
from .pipeline_output import AllegroPipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False


logger = logging.get_logger(__name__)

if is_bs4_available():
Expand Down Expand Up @@ -921,6 +930,9 @@ def __call__(
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()

if XLA_AVAILABLE:
xm.mark_step()

if not output_type == "latent":
latents = latents.to(self.vae.dtype)
video = self.decode_latents(latents)
Expand Down
13 changes: 12 additions & 1 deletion src/diffusers/pipelines/amused/pipeline_amused.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,18 @@
from ...image_processor import VaeImageProcessor
from ...models import UVit2DModel, VQModel
from ...schedulers import AmusedScheduler
from ...utils import replace_example_docstring
from ...utils import is_torch_xla_available, replace_example_docstring
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -299,6 +307,9 @@ def __call__(
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, timestep, latents)

if XLA_AVAILABLE:
xm.mark_step()

if output_type == "latent":
output = latents
else:
Expand Down
13 changes: 12 additions & 1 deletion src/diffusers/pipelines/amused/pipeline_amused_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,18 @@
from ...image_processor import PipelineImageInput, VaeImageProcessor
from ...models import UVit2DModel, VQModel
from ...schedulers import AmusedScheduler
from ...utils import replace_example_docstring
from ...utils import is_torch_xla_available, replace_example_docstring
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -325,6 +333,9 @@ def __call__(
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, timestep, latents)

if XLA_AVAILABLE:
xm.mark_step()

if output_type == "latent":
output = latents
else:
Expand Down
13 changes: 12 additions & 1 deletion src/diffusers/pipelines/amused/pipeline_amused_inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,18 @@
from ...image_processor import PipelineImageInput, VaeImageProcessor
from ...models import UVit2DModel, VQModel
from ...schedulers import AmusedScheduler
from ...utils import replace_example_docstring
from ...utils import is_torch_xla_available, replace_example_docstring
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -356,6 +364,9 @@ def __call__(
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, timestep, latents)

if XLA_AVAILABLE:
xm.mark_step()

if output_type == "latent":
output = latents
else:
Expand Down
12 changes: 12 additions & 0 deletions src/diffusers/pipelines/animatediff/pipeline_animatediff.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from ...utils import (
USE_PEFT_BACKEND,
deprecate,
is_torch_xla_available,
logging,
replace_example_docstring,
scale_lora_layers,
Expand All @@ -47,8 +48,16 @@
from .pipeline_output import AnimateDiffPipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False

logger = logging.get_logger(__name__) # pylint: disable=invalid-name


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -844,6 +853,9 @@ def __call__(
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)

if XLA_AVAILABLE:
xm.mark_step()

# 9. Post processing
if output_type == "latent":
video = latents
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from ...models.lora import adjust_lora_scale_text_encoder
from ...models.unets.unet_motion_model import MotionAdapter
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, scale_lora_layers, unscale_lora_layers
from ...utils.torch_utils import is_compiled_module, randn_tensor
from ...video_processor import VideoProcessor
from ..free_init_utils import FreeInitMixin
Expand All @@ -41,8 +41,16 @@
from .pipeline_output import AnimateDiffPipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False

logger = logging.get_logger(__name__) # pylint: disable=invalid-name


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -1090,6 +1098,9 @@ def __call__(
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()

if XLA_AVAILABLE:
xm.mark_step()

# 9. Post processing
if output_type == "latent":
video = latents
Expand Down
12 changes: 12 additions & 0 deletions src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
)
from ...utils import (
USE_PEFT_BACKEND,
is_torch_xla_available,
logging,
replace_example_docstring,
scale_lora_layers,
Expand All @@ -60,8 +61,16 @@
from .pipeline_output import AnimateDiffPipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False

logger = logging.get_logger(__name__) # pylint: disable=invalid-name


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -1261,6 +1270,9 @@ def __call__(

progress_bar.update()

if XLA_AVAILABLE:
xm.mark_step()

# make sure the VAE is in float32 mode, as it overflows in float16
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
USE_PEFT_BACKEND,
is_torch_xla_available,
logging,
replace_example_docstring,
scale_lora_layers,
Expand All @@ -42,8 +43,16 @@
from .pipeline_output import AnimateDiffPipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False

logger = logging.get_logger(__name__) # pylint: disable=invalid-name


EXAMPLE_DOC_STRING = """
Examples:
```python
Expand Down Expand Up @@ -994,6 +1003,9 @@ def __call__(
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()

if XLA_AVAILABLE:
xm.mark_step()

# 11. Post processing
if output_type == "latent":
video = latents
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, scale_lora_layers, unscale_lora_layers
from ...utils.torch_utils import randn_tensor
from ...video_processor import VideoProcessor
from ..free_init_utils import FreeInitMixin
Expand All @@ -40,8 +40,16 @@
from .pipeline_output import AnimateDiffPipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False

logger = logging.get_logger(__name__) # pylint: disable=invalid-name


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -1037,6 +1045,9 @@ def __call__(
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()

if XLA_AVAILABLE:
xm.mark_step()

# 10. Post-processing
if output_type == "latent":
video = latents
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, scale_lora_layers, unscale_lora_layers
from ...utils.torch_utils import is_compiled_module, randn_tensor
from ...video_processor import VideoProcessor
from ..free_init_utils import FreeInitMixin
Expand All @@ -48,8 +48,16 @@
from .pipeline_output import AnimateDiffPipelineOutput


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False

logger = logging.get_logger(__name__) # pylint: disable=invalid-name


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -1325,6 +1333,9 @@ def __call__(
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()

if XLA_AVAILABLE:
xm.mark_step()

# 11. Post-processing
if output_type == "latent":
video = latents
Expand Down
13 changes: 12 additions & 1 deletion src/diffusers/pipelines/audioldm/pipeline_audioldm.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,21 @@

from ...models import AutoencoderKL, UNet2DConditionModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import logging, replace_example_docstring
from ...utils import is_torch_xla_available, logging, replace_example_docstring
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False

logger = logging.get_logger(__name__) # pylint: disable=invalid-name


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -530,6 +538,9 @@ def __call__(
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)

if XLA_AVAILABLE:
xm.mark_step()

# 8. Post-processing
mel_spectrogram = self.decode_latents(latents)

Expand Down
15 changes: 15 additions & 0 deletions src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,20 @@
if is_librosa_available():
import librosa


from ...utils import is_torch_xla_available


if is_torch_xla_available():
import torch_xla.core.xla_model as xm

XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False

logger = logging.get_logger(__name__) # pylint: disable=invalid-name


EXAMPLE_DOC_STRING = """
Examples:
```py
Expand Down Expand Up @@ -1033,6 +1045,9 @@ def __call__(
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)

if XLA_AVAILABLE:
xm.mark_step()

self.maybe_free_model_hooks()

# 8. Post-processing
Expand Down
Loading
Loading