Skip to content

Commit be861e2

Browse files
committed
update
1 parent 2d744f0 commit be861e2

File tree

8 files changed

+42
-38
lines changed

8 files changed

+42
-38
lines changed

examples/community/mod_controlnet_tile_sr_sdxl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -701,7 +701,7 @@ def check_inputs(
701701
raise ValueError("`max_tile_size` cannot be None.")
702702
elif not isinstance(max_tile_size, int) or max_tile_size not in (1024, 1280):
703703
raise ValueError(
704-
f"`max_tile_size` has to be in 1024 or 1280 but is {max_tile_size} of type" f" {type(max_tile_size)}."
704+
f"`max_tile_size` has to be in 1024 or 1280 but is {max_tile_size} of type {type(max_tile_size)}."
705705
)
706706
if tile_gaussian_sigma is None:
707707
raise ValueError("`tile_gaussian_sigma` cannot be None.")

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@
122122
"pytest-timeout",
123123
"pytest-xdist",
124124
"python>=3.8.0",
125-
"ruff==0.9.7",
125+
"ruff==0.9.10",
126126
"safetensors>=0.3.1",
127127
"sentencepiece>=0.1.91,!=0.1.92",
128128
"GitPython<3.1.19",

src/diffusers/dependency_versions_table.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
"pytest-timeout": "pytest-timeout",
3030
"pytest-xdist": "pytest-xdist",
3131
"python": "python>=3.8.0",
32-
"ruff": "ruff==0.9.7",
32+
"ruff": "ruff==0.9.10",
3333
"safetensors": "safetensors>=0.3.1",
3434
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
3535
"GitPython": "GitPython<3.1.19",

src/diffusers/pipelines/easyanimate/pipeline_easyanimate_inpaint.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1130,7 +1130,7 @@ def __call__(
11301130
f"Incorrect configuration settings! The config of `pipeline.transformer`: {self.transformer.config} expects"
11311131
f" {self.transformer.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
11321132
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1133-
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
1133+
f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of"
11341134
" `pipeline.transformer` or your `mask_image` or `image` input."
11351135
)
11361136

src/diffusers/pipelines/wan/pipeline_wan_i2v.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ def check_inputs(
318318
callback_on_step_end_tensor_inputs=None,
319319
):
320320
if not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image):
321-
raise ValueError("`image` has to be of type `torch.Tensor` or `PIL.Image.Image` but is" f" {type(image)}")
321+
raise ValueError(f"`image` has to be of type `torch.Tensor` or `PIL.Image.Image` but is {type(image)}")
322322
if height % 16 != 0 or width % 16 != 0:
323323
raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.")
324324

tests/pipelines/hunyuandit/test_hunyuan_dit.py

Lines changed: 28 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -21,19 +21,23 @@
2121
import torch
2222
from transformers import AutoTokenizer, BertModel, T5EncoderModel
2323

24-
from diffusers import (AutoencoderKL, DDPMScheduler, HunyuanDiT2DModel,
25-
HunyuanDiTPipeline)
26-
from diffusers.utils.testing_utils import (enable_full_determinism,
27-
numpy_cosine_similarity_distance,
28-
require_torch_accelerator, slow,
29-
torch_device)
30-
31-
from ..pipeline_params import (TEXT_TO_IMAGE_BATCH_PARAMS,
32-
TEXT_TO_IMAGE_IMAGE_PARAMS,
33-
TEXT_TO_IMAGE_PARAMS)
24+
from diffusers import AutoencoderKL, DDPMScheduler, HunyuanDiT2DModel, HunyuanDiTPipeline
25+
from diffusers.utils.testing_utils import (
26+
enable_full_determinism,
27+
numpy_cosine_similarity_distance,
28+
require_torch_accelerator,
29+
slow,
30+
torch_device,
31+
)
32+
33+
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
3434
from ..test_pipelines_common import (
35-
PipelineTesterMixin, check_qkv_fusion_matches_attn_procs_length,
36-
check_qkv_fusion_processors_exist, to_np)
35+
PipelineTesterMixin,
36+
check_qkv_fusion_matches_attn_procs_length,
37+
check_qkv_fusion_processors_exist,
38+
to_np,
39+
)
40+
3741

3842
enable_full_determinism()
3943

@@ -170,9 +174,9 @@ def test_fused_qkv_projections(self):
170174
# TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added
171175
# to the pipeline level.
172176
pipe.transformer.fuse_qkv_projections()
173-
assert check_qkv_fusion_processors_exist(
174-
pipe.transformer
175-
), "Something wrong with the fused attention processors. Expected all the attention processors to be fused."
177+
assert check_qkv_fusion_processors_exist(pipe.transformer), (
178+
"Something wrong with the fused attention processors. Expected all the attention processors to be fused."
179+
)
176180
assert check_qkv_fusion_matches_attn_procs_length(
177181
pipe.transformer, pipe.transformer.original_attn_processors
178182
), "Something wrong with the attention processors concerning the fused QKV projections."
@@ -188,15 +192,15 @@ def test_fused_qkv_projections(self):
188192
image_disabled = pipe(**inputs)[0]
189193
image_slice_disabled = image_disabled[0, -3:, -3:, -1]
190194

191-
assert np.allclose(
192-
original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2
193-
), "Fusion of QKV projections shouldn't affect the outputs."
194-
assert np.allclose(
195-
image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2
196-
), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled."
197-
assert np.allclose(
198-
original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2
199-
), "Original outputs should match when fused QKV projections are disabled."
195+
assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), (
196+
"Fusion of QKV projections shouldn't affect the outputs."
197+
)
198+
assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), (
199+
"Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled."
200+
)
201+
assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), (
202+
"Original outputs should match when fused QKV projections are disabled."
203+
)
200204

201205
@unittest.skip(
202206
"Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have."

tests/single_file/test_model_wan_autoencoder_single_file.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,6 @@ def test_single_file_components(self):
5656
for param_name, param_value in model_single_file.config.items():
5757
if param_name in PARAMS_TO_IGNORE:
5858
continue
59-
assert (
60-
model.config[param_name] == param_value
61-
), f"{param_name} differs between single file loading and pretrained loading"
59+
assert model.config[param_name] == param_value, (
60+
f"{param_name} differs between single file loading and pretrained loading"
61+
)

tests/single_file/test_model_wan_transformer3d_single_file.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -57,9 +57,9 @@ def test_single_file_components(self):
5757
for param_name, param_value in model_single_file.config.items():
5858
if param_name in PARAMS_TO_IGNORE:
5959
continue
60-
assert (
61-
model.config[param_name] == param_value
62-
), f"{param_name} differs between single file loading and pretrained loading"
60+
assert model.config[param_name] == param_value, (
61+
f"{param_name} differs between single file loading and pretrained loading"
62+
)
6363

6464

6565
@require_big_gpu_with_torch_cuda
@@ -88,6 +88,6 @@ def test_single_file_components(self):
8888
for param_name, param_value in model_single_file.config.items():
8989
if param_name in PARAMS_TO_IGNORE:
9090
continue
91-
assert (
92-
model.config[param_name] == param_value
93-
), f"{param_name} differs between single file loading and pretrained loading"
91+
assert model.config[param_name] == param_value, (
92+
f"{param_name} differs between single file loading and pretrained loading"
93+
)

0 commit comments

Comments
 (0)