Skip to content

Commit 68da86a

Browse files
committed
make style and make quality
1 parent 2f549ee commit 68da86a

File tree

3 files changed

+10
-10
lines changed

3 files changed

+10
-10
lines changed

src/diffusers/image_processor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1471,4 +1471,4 @@ def resize_and_crop_tensor(samples: torch.Tensor, new_width: int, new_height: in
14711471
end_y = start_y + new_height
14721472
samples = samples[:, :, start_y:end_y, start_x:end_x]
14731473

1474-
return samples
1474+
return samples

src/diffusers/pipelines/wan/image_processor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@ class WanAnimateImageProcessor(VaeImageProcessor):
4343
Whether to convert the images to grayscale format.
4444
fill_color (`str` or `float` or `Tuple[float, ...]`, *optional*, defaults to `None`):
4545
An optional fill color when `resize_mode` is set to `"fill"`. This will fill the empty space with that
46-
color instead of filling with data from the image. Any valid `color` argument to `PIL.Image.new` is
47-
valid; if `None`, will default to filling with data from `image`.
46+
color instead of filling with data from the image. Any valid `color` argument to `PIL.Image.new` is valid;
47+
if `None`, will default to filling with data from `image`.
4848
"""
4949

5050
@register_to_config

src/diffusers/pipelines/wan/pipeline_wan_animate.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from transformers import AutoTokenizer, CLIPImageProcessor, CLIPVisionModel, UMT5EncoderModel
2424

2525
from ...callbacks import MultiPipelineCallbacks, PipelineCallback
26-
from ...image_processor import PipelineImageInput, VaeImageProcessor
26+
from ...image_processor import PipelineImageInput
2727
from ...loaders import WanLoraLoaderMixin
2828
from ...models import AutoencoderKLWan, WanAnimateTransformer3DModel
2929
from ...schedulers import UniPCMultistepScheduler
@@ -978,9 +978,9 @@ def __call__(
978978
image_height, image_width = self.video_processor.get_default_height_width(image)
979979
if image_height != height or image_width != width:
980980
logger.warning(f"Reshaping reference image from ({image_width}, {image_height}) to ({width}, {height})")
981-
image_pixels = self.vae_image_processor.preprocess(
982-
image, height=height, width=width, resize_mode="fill"
983-
).to(device, dtype=torch.float32)
981+
image_pixels = self.vae_image_processor.preprocess(image, height=height, width=width, resize_mode="fill").to(
982+
device, dtype=torch.float32
983+
)
984984

985985
# Get CLIP features from the reference image
986986
if image_embeds is None:
@@ -1174,9 +1174,9 @@ def __call__(
11741174
.view(1, self.vae.config.z_dim, 1, 1, 1)
11751175
.to(latents.device, latents.dtype)
11761176
)
1177-
latents_recip_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
1178-
latents.device, latents.dtype
1179-
)
1177+
latents_recip_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(
1178+
1, self.vae.config.z_dim, 1, 1, 1
1179+
).to(latents.device, latents.dtype)
11801180
latents = latents / latents_recip_std + latents_mean
11811181
# Skip the first latent frame (used for conditioning)
11821182
out_frames = self.vae.decode(latents[:, :, 1:], return_dict=False)[0]

0 commit comments

Comments
 (0)