Skip to content

Commit 1cb8ec9

Browse files
committed
reformat code
1 parent f1bb604 commit 1cb8ec9

File tree

3 files changed

+8
-4
lines changed

3 files changed

+8
-4
lines changed

src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
```
4343
"""
4444

45+
4546
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
4647
def retrieve_latents(
4748
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
@@ -55,6 +56,7 @@ def retrieve_latents(
5556
else:
5657
raise AttributeError("Could not access latents of provided encoder_output")
5758

59+
5860
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift
5961
def calculate_shift(
6062
image_seq_len,
@@ -469,7 +471,7 @@ def prepare_latents(
469471

470472
image = image.to(device=device, dtype=dtype)
471473
if image.shape[1] != self.latent_channels:
472-
image_latents = self._encode_vae_image(image=image, generator=generator) # [B,z,1,H',W']
474+
image_latents = self._encode_vae_image(image=image, generator=generator) # [B,z,1,H',W']
473475
else:
474476
image_latents = image
475477
if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:

src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
```
4646
"""
4747

48+
4849
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
4950
def retrieve_latents(
5051
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
@@ -58,6 +59,7 @@ def retrieve_latents(
5859
else:
5960
raise AttributeError("Could not access latents of provided encoder_output")
6061

62+
6163
# Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift
6264
def calculate_shift(
6365
image_seq_len,
@@ -490,14 +492,13 @@ def prepare_latents(
490492
elif image.dim() != 5:
491493
raise ValueError(f"Expected image dims 4 or 5, got {image.dim()}.")
492494

493-
494495
if latents is not None:
495496
latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype)
496497
return latents.to(device=device, dtype=dtype), latent_image_ids
497498

498499
image = image.to(device=device, dtype=dtype)
499500
if image.shape[1] != self.latent_channels:
500-
image_latents = self._encode_vae_image(image=image, generator=generator) # [B,z,1,H',W']
501+
image_latents = self._encode_vae_image(image=image, generator=generator) # [B,z,1,H',W']
501502
else:
502503
image_latents = image
503504
if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
@@ -971,7 +972,6 @@ def __call__(
971972

972973
latents = (1 - init_mask) * init_latents_proper + init_mask * latents
973974

974-
975975
if latents.dtype != latents_dtype:
976976
if torch.backends.mps.is_available():
977977
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272

src/diffusers/utils/dummy_torch_and_transformers_objects.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1756,6 +1756,7 @@ def from_config(cls, *args, **kwargs):
17561756
def from_pretrained(cls, *args, **kwargs):
17571757
requires_backends(cls, ["torch", "transformers"])
17581758

1759+
17591760
class QwenImageImg2ImgPipeline(metaclass=DummyObject):
17601761
_backends = ["torch", "transformers"]
17611762

@@ -1770,6 +1771,7 @@ def from_config(cls, *args, **kwargs):
17701771
def from_pretrained(cls, *args, **kwargs):
17711772
requires_backends(cls, ["torch", "transformers"])
17721773

1774+
17731775
class QwenImageInpaintPipeline(metaclass=DummyObject):
17741776
_backends = ["torch", "transformers"]
17751777

0 commit comments

Comments
 (0)