From d67234be0a4993dfaada65144d25069696c2cad7 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Apr 2025 17:17:32 +0530 Subject: [PATCH 1/3] fix timeout constant --- utils/fetch_latest_release_branch.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/utils/fetch_latest_release_branch.py b/utils/fetch_latest_release_branch.py index f0602d5b29a8..ba5ab70846a6 100644 --- a/utils/fetch_latest_release_branch.py +++ b/utils/fetch_latest_release_branch.py @@ -17,9 +17,6 @@ import requests from packaging.version import parse -from ..src.diffusers.utils.constants import DIFFUSERS_REQUEST_TIMEOUT - - # GitHub repository details USER = "huggingface" REPO = "diffusers" @@ -33,7 +30,7 @@ def fetch_all_branches(user, repo): response = requests.get( f"https://api.github.com/repos/{user}/{repo}/branches", params={"page": page}, - timeout=DIFFUSERS_REQUEST_TIMEOUT, + timeout=60, ) # Check if the request was successful From 3ae0d67e45efca3b38ba748fd217ae264b910803 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Apr 2025 17:19:43 +0530 Subject: [PATCH 2/3] style --- .../train_dreambooth_lora_flux_advanced.py | 6 +- .../train_dreambooth_lora_sd15_advanced.py | 12 ++-- .../train_dreambooth_lora_sdxl_advanced.py | 12 ++-- examples/community/pipeline_prompt2prompt.py | 12 ++-- .../train_custom_diffusion.py | 24 +++---- .../flux-control/train_control_lora_flux.py | 6 +- examples/model_search/pipeline_easy.py | 6 +- examples/research_projects/anytext/anytext.py | 6 +- .../anytext/ocr_recog/RecSVTR.py | 6 +- .../train_multi_subject_dreambooth.py | 12 ++-- .../textual_inversion.py | 6 +- .../textual_inversion/textual_inversion.py | 6 +- .../textual_inversion/textual_inversion.py | 6 +- .../textual_inversion_sdxl.py | 12 ++-- examples/vqgan/train_vqgan.py | 12 ++-- .../convert_dance_diffusion_to_diffusers.py | 12 ++-- ...vert_hunyuandit_controlnet_to_diffusers.py | 6 +- scripts/convert_hunyuandit_to_diffusers.py | 6 +- scripts/convert_k_upscaler_to_diffusers.py | 6 +- scripts/convert_mochi_to_diffusers.py | 12 ++-- scripts/convert_svd_to_diffusers.py | 12 ++-- scripts/convert_vq_diffusion_to_diffusers.py | 24 +++---- .../loaders/lora_conversion_utils.py | 66 ++++++++--------- .../models/transformers/transformer_2d.py | 6 +- src/diffusers/pipelines/free_noise_utils.py | 6 +- .../pipelines/omnigen/processor_omnigen.py | 12 ++-- src/diffusers/pipelines/shap_e/renderer.py | 12 ++-- src/diffusers/quantizers/base.py | 12 ++-- tests/models/test_modeling_common.py | 12 ++-- .../test_models_transformer_sd3.py | 12 ++-- .../unets/test_models_unet_2d_condition.py | 36 +++++----- tests/others/test_image_processor.py | 30 ++++---- tests/pipelines/amused/test_amused.py | 3 +- tests/pipelines/amused/test_amused_img2img.py | 3 +- tests/pipelines/amused/test_amused_inpaint.py | 3 +- .../aura_flow/test_pipeline_aura_flow.py | 24 +++---- .../blipdiffusion/test_blipdiffusion.py | 6 +- tests/pipelines/cogvideo/test_cogvideox.py | 24 +++---- .../cogvideo/test_cogvideox_fun_control.py | 24 +++---- .../cogvideo/test_cogvideox_image2video.py | 24 +++---- .../cogvideo/test_cogvideox_video2video.py | 24 +++---- .../test_controlnet_blip_diffusion.py | 6 +- .../controlnet_flux/test_controlnet_flux.py | 6 +- .../test_controlnet_flux_img2img.py | 24 +++---- .../test_controlnet_hunyuandit.py | 6 +- .../test_controlnet_inpaint_sd3.py | 6 +- .../controlnet_sd3/test_controlnet_sd3.py | 6 +- tests/pipelines/flux/test_pipeline_flux.py | 24 +++---- .../flux/test_pipeline_flux_control.py | 24 +++---- .../test_pipeline_flux_control_inpaint.py | 24 +++---- .../pipelines/hunyuandit/test_hunyuan_dit.py | 24 +++---- tests/pipelines/kandinsky/test_kandinsky.py | 12 ++-- .../kandinsky/test_kandinsky_combined.py | 36 +++++----- .../kandinsky/test_kandinsky_img2img.py | 12 ++-- .../kandinsky/test_kandinsky_inpaint.py | 12 ++-- .../pipelines/kandinsky2_2/test_kandinsky.py | 12 ++-- .../kandinsky2_2/test_kandinsky_combined.py | 36 +++++----- .../kandinsky2_2/test_kandinsky_controlnet.py | 12 ++-- .../test_kandinsky_controlnet_img2img.py | 12 ++-- .../kandinsky2_2/test_kandinsky_img2img.py | 12 ++-- .../kandinsky2_2/test_kandinsky_inpaint.py | 12 ++-- tests/pipelines/kandinsky3/test_kandinsky3.py | 6 +- .../kandinsky3/test_kandinsky3_img2img.py | 6 +- tests/pipelines/pag/test_pag_animatediff.py | 6 +- tests/pipelines/pag/test_pag_controlnet_sd.py | 6 +- .../pag/test_pag_controlnet_sd_inpaint.py | 6 +- .../pipelines/pag/test_pag_controlnet_sdxl.py | 6 +- .../pag/test_pag_controlnet_sdxl_img2img.py | 6 +- tests/pipelines/pag/test_pag_hunyuan_dit.py | 24 +++---- tests/pipelines/pag/test_pag_kolors.py | 6 +- tests/pipelines/pag/test_pag_pixart_sigma.py | 6 +- tests/pipelines/pag/test_pag_sana.py | 6 +- tests/pipelines/pag/test_pag_sd.py | 18 ++--- tests/pipelines/pag/test_pag_sd3.py | 30 ++++---- tests/pipelines/pag/test_pag_sd3_img2img.py | 18 ++--- tests/pipelines/pag/test_pag_sd_img2img.py | 18 ++--- tests/pipelines/pag/test_pag_sd_inpaint.py | 12 ++-- tests/pipelines/pag/test_pag_sdxl.py | 18 ++--- tests/pipelines/pag/test_pag_sdxl_img2img.py | 18 ++--- tests/pipelines/pag/test_pag_sdxl_inpaint.py | 18 ++--- tests/pipelines/pixart_sigma/test_pixart.py | 24 +++---- .../test_stable_cascade_combined.py | 12 ++-- .../stable_diffusion/test_stable_diffusion.py | 48 ++++++------- .../test_pipeline_stable_diffusion_3.py | 24 +++---- .../test_stable_diffusion_xl.py | 30 ++++---- .../test_stable_diffusion_xl_inpaint.py | 12 ++-- tests/pipelines/test_pipelines.py | 24 +++---- tests/pipelines/test_pipelines_common.py | 72 +++++++++---------- .../wuerstchen/test_wuerstchen_combined.py | 12 ++-- tests/schedulers/test_scheduler_dpm_multi.py | 6 +- tests/schedulers/test_scheduler_dpm_single.py | 6 +- .../test_scheduler_edm_dpmsolver_multistep.py | 6 +- tests/schedulers/test_scheduler_euler.py | 12 ++-- tests/schedulers/test_scheduler_heun.py | 6 +- .../single_file/single_file_testing_utils.py | 24 +++---- tests/single_file/test_lumina2_transformer.py | 6 +- .../test_model_autoencoder_dc_single_file.py | 18 ++--- .../test_model_controlnet_single_file.py | 6 +- ...test_model_flux_transformer_single_file.py | 6 +- .../test_model_motion_adapter_single_file.py | 24 +++---- .../test_model_sd_cascade_unet_single_file.py | 24 +++---- .../single_file/test_model_vae_single_file.py | 6 +- .../test_model_wan_autoencoder_single_file.py | 6 +- ...est_model_wan_transformer3d_single_file.py | 12 ++-- tests/single_file/test_sana_transformer.py | 6 +- utils/fetch_latest_release_branch.py | 1 + 106 files changed, 777 insertions(+), 769 deletions(-) diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py index dc774d145c83..82075280c329 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py @@ -839,9 +839,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): idx = 0 for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders): assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings." - assert all(isinstance(tok, str) for tok in inserting_toks), ( - "All elements in inserting_toks should be strings." - ) + assert all( + isinstance(tok, str) for tok in inserting_toks + ), "All elements in inserting_toks should be strings." self.inserting_toks = inserting_toks special_tokens_dict = {"additional_special_tokens": self.inserting_toks} diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py index 95ba53391cf3..32df4c4e16a8 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py @@ -725,9 +725,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): idx = 0 for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders): assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings." - assert all(isinstance(tok, str) for tok in inserting_toks), ( - "All elements in inserting_toks should be strings." - ) + assert all( + isinstance(tok, str) for tok in inserting_toks + ), "All elements in inserting_toks should be strings." self.inserting_toks = inserting_toks special_tokens_dict = {"additional_special_tokens": self.inserting_toks} @@ -747,9 +747,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): .to(dtype=self.dtype) * std_token_embedding ) - self.embeddings_settings[f"original_embeddings_{idx}"] = ( - text_encoder.text_model.embeddings.token_embedding.weight.data.clone() - ) + self.embeddings_settings[ + f"original_embeddings_{idx}" + ] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone() self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding inu = torch.ones((len(tokenizer),), dtype=torch.bool) diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py index 236dc20d621c..f1a7f0224011 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py @@ -890,9 +890,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): idx = 0 for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders): assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings." - assert all(isinstance(tok, str) for tok in inserting_toks), ( - "All elements in inserting_toks should be strings." - ) + assert all( + isinstance(tok, str) for tok in inserting_toks + ), "All elements in inserting_toks should be strings." self.inserting_toks = inserting_toks special_tokens_dict = {"additional_special_tokens": self.inserting_toks} @@ -912,9 +912,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): .to(dtype=self.dtype) * std_token_embedding ) - self.embeddings_settings[f"original_embeddings_{idx}"] = ( - text_encoder.text_model.embeddings.token_embedding.weight.data.clone() - ) + self.embeddings_settings[ + f"original_embeddings_{idx}" + ] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone() self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding inu = torch.ones((len(tokenizer),), dtype=torch.bool) diff --git a/examples/community/pipeline_prompt2prompt.py b/examples/community/pipeline_prompt2prompt.py index b9985542ccf7..736f00799eae 100644 --- a/examples/community/pipeline_prompt2prompt.py +++ b/examples/community/pipeline_prompt2prompt.py @@ -907,12 +907,12 @@ def create_controller( # reweight if edit_type == "reweight": - assert equalizer_words is not None and equalizer_strengths is not None, ( - "To use reweight edit, please specify equalizer_words and equalizer_strengths." - ) - assert len(equalizer_words) == len(equalizer_strengths), ( - "equalizer_words and equalizer_strengths must be of same length." - ) + assert ( + equalizer_words is not None and equalizer_strengths is not None + ), "To use reweight edit, please specify equalizer_words and equalizer_strengths." + assert len(equalizer_words) == len( + equalizer_strengths + ), "equalizer_words and equalizer_strengths must be of same length." equalizer = get_equalizer(prompts[1], equalizer_words, equalizer_strengths, tokenizer=tokenizer) return AttentionReweight( prompts, diff --git a/examples/custom_diffusion/train_custom_diffusion.py b/examples/custom_diffusion/train_custom_diffusion.py index fa2959cf41a1..faefd980ee52 100644 --- a/examples/custom_diffusion/train_custom_diffusion.py +++ b/examples/custom_diffusion/train_custom_diffusion.py @@ -731,18 +731,18 @@ def main(args): if not class_images_dir.exists(): class_images_dir.mkdir(parents=True, exist_ok=True) if args.real_prior: - assert (class_images_dir / "images").exists(), ( - f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' - ) - assert len(list((class_images_dir / "images").iterdir())) == args.num_class_images, ( - f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' - ) - assert (class_images_dir / "caption.txt").exists(), ( - f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' - ) - assert (class_images_dir / "images.txt").exists(), ( - f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' - ) + assert ( + class_images_dir / "images" + ).exists(), f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' + assert ( + len(list((class_images_dir / "images").iterdir())) == args.num_class_images + ), f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' + assert ( + class_images_dir / "caption.txt" + ).exists(), f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' + assert ( + class_images_dir / "images.txt" + ).exists(), f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' concept["class_prompt"] = os.path.join(class_images_dir, "caption.txt") concept["class_data_dir"] = os.path.join(class_images_dir, "images.txt") args.concepts_list[i] = concept diff --git a/examples/flux-control/train_control_lora_flux.py b/examples/flux-control/train_control_lora_flux.py index 2a9bfd949cde..269e1b4477a0 100644 --- a/examples/flux-control/train_control_lora_flux.py +++ b/examples/flux-control/train_control_lora_flux.py @@ -91,9 +91,9 @@ def log_validation(flux_transformer, args, accelerator, weight_dtype, step, is_f torch_dtype=weight_dtype, ) pipeline.load_lora_weights(args.output_dir) - assert pipeline.transformer.config.in_channels == initial_channels * 2, ( - f"{pipeline.transformer.config.in_channels=}" - ) + assert ( + pipeline.transformer.config.in_channels == initial_channels * 2 + ), f"{pipeline.transformer.config.in_channels=}" pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) diff --git a/examples/model_search/pipeline_easy.py b/examples/model_search/pipeline_easy.py index b82e98fb71ff..a8add8311006 100644 --- a/examples/model_search/pipeline_easy.py +++ b/examples/model_search/pipeline_easy.py @@ -1081,9 +1081,9 @@ def auto_load_textual_inversion( f"textual_inversion_path: {search_word} -> {textual_inversion_path.model_status.site_url}" ) - pretrained_model_name_or_paths[pretrained_model_name_or_paths.index(search_word)] = ( - textual_inversion_path.model_path - ) + pretrained_model_name_or_paths[ + pretrained_model_name_or_paths.index(search_word) + ] = textual_inversion_path.model_path self.load_textual_inversion( pretrained_model_name_or_paths, token=tokens, tokenizer=tokenizer, text_encoder=text_encoder, **kwargs diff --git a/examples/research_projects/anytext/anytext.py b/examples/research_projects/anytext/anytext.py index 2e96014c4193..5c30b24efe88 100644 --- a/examples/research_projects/anytext/anytext.py +++ b/examples/research_projects/anytext/anytext.py @@ -187,9 +187,9 @@ def get_clip_token_for_string(tokenizer, string): return_tensors="pt", ) tokens = batch_encoding["input_ids"] - assert torch.count_nonzero(tokens - 49407) == 2, ( - f"String '{string}' maps to more than a single token. Please use another string" - ) + assert ( + torch.count_nonzero(tokens - 49407) == 2 + ), f"String '{string}' maps to more than a single token. Please use another string" return tokens[0, 1] diff --git a/examples/research_projects/anytext/ocr_recog/RecSVTR.py b/examples/research_projects/anytext/ocr_recog/RecSVTR.py index 3dc813b84a55..590a96995b26 100644 --- a/examples/research_projects/anytext/ocr_recog/RecSVTR.py +++ b/examples/research_projects/anytext/ocr_recog/RecSVTR.py @@ -312,9 +312,9 @@ def __init__(self, img_size=(32, 100), in_channels=3, embed_dim=768, sub_num=2): def forward(self, x): B, C, H, W = x.shape - assert H == self.img_size[0] and W == self.img_size[1], ( - f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." - ) + assert ( + H == self.img_size[0] and W == self.img_size[1] + ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).permute(0, 2, 1) return x diff --git a/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py b/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py index 57c555e43fd8..0f507b26d6a8 100644 --- a/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py +++ b/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py @@ -763,9 +763,9 @@ def main(args): # Parse instance and class inputs, and double check that lengths match instance_data_dir = args.instance_data_dir.split(",") instance_prompt = args.instance_prompt.split(",") - assert all(x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt)]), ( - "Instance data dir and prompt inputs are not of the same length." - ) + assert all( + x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt)] + ), "Instance data dir and prompt inputs are not of the same length." if args.with_prior_preservation: class_data_dir = args.class_data_dir.split(",") @@ -788,9 +788,9 @@ def main(args): negative_validation_prompts.append(None) args.validation_negative_prompt = negative_validation_prompts - assert num_of_validation_prompts == len(negative_validation_prompts), ( - "The length of negative prompts for validation is greater than the number of validation prompts." - ) + assert num_of_validation_prompts == len( + negative_validation_prompts + ), "The length of negative prompts for validation is greater than the number of validation prompts." args.validation_inference_steps = [args.validation_inference_steps] * num_of_validation_prompts args.validation_guidance_scale = [args.validation_guidance_scale] * num_of_validation_prompts diff --git a/examples/research_projects/multi_token_textual_inversion/textual_inversion.py b/examples/research_projects/multi_token_textual_inversion/textual_inversion.py index 75dcfccbd5b8..19432142f541 100644 --- a/examples/research_projects/multi_token_textual_inversion/textual_inversion.py +++ b/examples/research_projects/multi_token_textual_inversion/textual_inversion.py @@ -830,9 +830,9 @@ def main(): # Let's make sure we don't update any embedding weights besides the newly added token index_no_updates = get_mask(tokenizer, accelerator) with torch.no_grad(): - accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = ( - orig_embeds_params[index_no_updates] - ) + accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ + index_no_updates + ] = orig_embeds_params[index_no_updates] # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: diff --git a/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py b/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py index a881b06a94dc..7f5dc8ece9fc 100644 --- a/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py +++ b/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py @@ -886,9 +886,9 @@ def main(): index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False with torch.no_grad(): - accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = ( - orig_embeds_params[index_no_updates] - ) + accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ + index_no_updates + ] = orig_embeds_params[index_no_updates] # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index 51e220828cdf..757a12045f10 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -910,9 +910,9 @@ def main(): index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False with torch.no_grad(): - accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = ( - orig_embeds_params[index_no_updates] - ) + accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ + index_no_updates + ] = orig_embeds_params[index_no_updates] # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: diff --git a/examples/textual_inversion/textual_inversion_sdxl.py b/examples/textual_inversion/textual_inversion_sdxl.py index f32c729195b0..11463943c448 100644 --- a/examples/textual_inversion/textual_inversion_sdxl.py +++ b/examples/textual_inversion/textual_inversion_sdxl.py @@ -965,12 +965,12 @@ def main(): index_no_updates_2[min(placeholder_token_ids_2) : max(placeholder_token_ids_2) + 1] = False with torch.no_grad(): - accelerator.unwrap_model(text_encoder_1).get_input_embeddings().weight[index_no_updates] = ( - orig_embeds_params[index_no_updates] - ) - accelerator.unwrap_model(text_encoder_2).get_input_embeddings().weight[index_no_updates_2] = ( - orig_embeds_params_2[index_no_updates_2] - ) + accelerator.unwrap_model(text_encoder_1).get_input_embeddings().weight[ + index_no_updates + ] = orig_embeds_params[index_no_updates] + accelerator.unwrap_model(text_encoder_2).get_input_embeddings().weight[ + index_no_updates_2 + ] = orig_embeds_params_2[index_no_updates_2] # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: diff --git a/examples/vqgan/train_vqgan.py b/examples/vqgan/train_vqgan.py index 33d234da52d7..992722fa7a78 100644 --- a/examples/vqgan/train_vqgan.py +++ b/examples/vqgan/train_vqgan.py @@ -653,15 +653,15 @@ def main(): try: # Gets the resolution of the timm transformation after centercrop timm_centercrop_transform = timm_transform.transforms[1] - assert isinstance(timm_centercrop_transform, transforms.CenterCrop), ( - f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." - ) + assert isinstance( + timm_centercrop_transform, transforms.CenterCrop + ), f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." timm_model_resolution = timm_centercrop_transform.size[0] # Gets final normalization timm_model_normalization = timm_transform.transforms[-1] - assert isinstance(timm_model_normalization, transforms.Normalize), ( - f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." - ) + assert isinstance( + timm_model_normalization, transforms.Normalize + ), f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." except AssertionError as e: raise NotImplementedError(e) # Enable flash attention if asked diff --git a/scripts/convert_dance_diffusion_to_diffusers.py b/scripts/convert_dance_diffusion_to_diffusers.py index e269a49070cc..f9caa50dfc9b 100755 --- a/scripts/convert_dance_diffusion_to_diffusers.py +++ b/scripts/convert_dance_diffusion_to_diffusers.py @@ -261,9 +261,9 @@ def main(args): model_name = args.model_path.split("/")[-1].split(".")[0] if not os.path.isfile(args.model_path): - assert model_name == args.model_path, ( - f"Make sure to provide one of the official model names {MODELS_MAP.keys()}" - ) + assert ( + model_name == args.model_path + ), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}" args.model_path = download(model_name) sample_rate = MODELS_MAP[model_name]["sample_rate"] @@ -290,9 +290,9 @@ def main(args): assert all(k.endswith("kernel") for k in list(diffusers_minus_renamed)), f"Problem with {diffusers_minus_renamed}" for key, value in renamed_state_dict.items(): - assert diffusers_state_dict[key].squeeze().shape == value.squeeze().shape, ( - f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}" - ) + assert ( + diffusers_state_dict[key].squeeze().shape == value.squeeze().shape + ), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}" if key == "time_proj.weight": value = value.squeeze() diff --git a/scripts/convert_hunyuandit_controlnet_to_diffusers.py b/scripts/convert_hunyuandit_controlnet_to_diffusers.py index 5cef46c98983..1c8383690890 100644 --- a/scripts/convert_hunyuandit_controlnet_to_diffusers.py +++ b/scripts/convert_hunyuandit_controlnet_to_diffusers.py @@ -21,9 +21,9 @@ def main(args): model_config = HunyuanDiT2DControlNetModel.load_config( "Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers", subfolder="transformer" ) - model_config["use_style_cond_and_image_meta_size"] = ( - args.use_style_cond_and_image_meta_size - ) ### version <= v1.1: True; version >= v1.2: False + model_config[ + "use_style_cond_and_image_meta_size" + ] = args.use_style_cond_and_image_meta_size ### version <= v1.1: True; version >= v1.2: False print(model_config) for key in state_dict: diff --git a/scripts/convert_hunyuandit_to_diffusers.py b/scripts/convert_hunyuandit_to_diffusers.py index 65fcccb22a1a..ef174098a815 100644 --- a/scripts/convert_hunyuandit_to_diffusers.py +++ b/scripts/convert_hunyuandit_to_diffusers.py @@ -18,9 +18,9 @@ def main(args): device = "cuda" model_config = HunyuanDiT2DModel.load_config("Tencent-Hunyuan/HunyuanDiT-Diffusers", subfolder="transformer") - model_config["use_style_cond_and_image_meta_size"] = ( - args.use_style_cond_and_image_meta_size - ) ### version <= v1.1: True; version >= v1.2: False + model_config[ + "use_style_cond_and_image_meta_size" + ] = args.use_style_cond_and_image_meta_size ### version <= v1.1: True; version >= v1.2: False # input_size -> sample_size, text_dim -> cross_attention_dim for key in state_dict: diff --git a/scripts/convert_k_upscaler_to_diffusers.py b/scripts/convert_k_upscaler_to_diffusers.py index cff845ef8099..76d374e565d7 100644 --- a/scripts/convert_k_upscaler_to_diffusers.py +++ b/scripts/convert_k_upscaler_to_diffusers.py @@ -220,9 +220,9 @@ def unet_model_from_original_config(original_config): block_out_channels = original_config["channels"] - assert len(set(original_config["depths"])) == 1, ( - "UNet2DConditionModel currently do not support blocks with different number of layers" - ) + assert ( + len(set(original_config["depths"])) == 1 + ), "UNet2DConditionModel currently do not support blocks with different number of layers" layers_per_block = original_config["depths"][0] class_labels_dim = original_config["mapping_cond_dim"] diff --git a/scripts/convert_mochi_to_diffusers.py b/scripts/convert_mochi_to_diffusers.py index 64e4f69eac17..642045853591 100644 --- a/scripts/convert_mochi_to_diffusers.py +++ b/scripts/convert_mochi_to_diffusers.py @@ -305,9 +305,9 @@ def convert_mochi_vae_state_dict_to_diffusers(encoder_ckpt_path, decoder_ckpt_pa for i in range(down_block_layers[block]): # Convert resnets - new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.weight"] = ( - encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.0.weight") - ) + new_state_dict[ + f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.weight" + ] = encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.0.weight") new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.0.bias" ) @@ -317,9 +317,9 @@ def convert_mochi_vae_state_dict_to_diffusers(encoder_ckpt_path, decoder_ckpt_pa new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.conv1.conv.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.2.bias" ) - new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.weight"] = ( - encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.3.weight") - ) + new_state_dict[ + f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.weight" + ] = encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.3.weight") new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.3.bias" ) diff --git a/scripts/convert_svd_to_diffusers.py b/scripts/convert_svd_to_diffusers.py index e46410ccb3bd..3243ce294b26 100644 --- a/scripts/convert_svd_to_diffusers.py +++ b/scripts/convert_svd_to_diffusers.py @@ -381,9 +381,9 @@ def convert_ldm_unet_checkpoint( # TODO resnet time_mixer.mix_factor if f"input_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict: - new_checkpoint[f"down_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"] = ( - unet_state_dict[f"input_blocks.{i}.0.time_mixer.mix_factor"] - ) + new_checkpoint[ + f"down_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor" + ] = unet_state_dict[f"input_blocks.{i}.0.time_mixer.mix_factor"] if len(attentions): paths = renew_attention_paths(attentions) @@ -478,9 +478,9 @@ def convert_ldm_unet_checkpoint( ) if f"output_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict: - new_checkpoint[f"up_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"] = ( - unet_state_dict[f"output_blocks.{i}.0.time_mixer.mix_factor"] - ) + new_checkpoint[ + f"up_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor" + ] = unet_state_dict[f"output_blocks.{i}.0.time_mixer.mix_factor"] output_block_list = {k: sorted(v) for k, v in output_block_list.items()} if ["conv.bias", "conv.weight"] in output_block_list.values(): diff --git a/scripts/convert_vq_diffusion_to_diffusers.py b/scripts/convert_vq_diffusion_to_diffusers.py index fe62d18faff0..7da6b4094986 100644 --- a/scripts/convert_vq_diffusion_to_diffusers.py +++ b/scripts/convert_vq_diffusion_to_diffusers.py @@ -51,9 +51,9 @@ def vqvae_model_from_original_config(original_config): - assert original_config["target"] in PORTED_VQVAES, ( - f"{original_config['target']} has not yet been ported to diffusers." - ) + assert ( + original_config["target"] in PORTED_VQVAES + ), f"{original_config['target']} has not yet been ported to diffusers." original_config = original_config["params"] @@ -464,15 +464,15 @@ def vqvae_attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_p def transformer_model_from_original_config( original_diffusion_config, original_transformer_config, original_content_embedding_config ): - assert original_diffusion_config["target"] in PORTED_DIFFUSIONS, ( - f"{original_diffusion_config['target']} has not yet been ported to diffusers." - ) - assert original_transformer_config["target"] in PORTED_TRANSFORMERS, ( - f"{original_transformer_config['target']} has not yet been ported to diffusers." - ) - assert original_content_embedding_config["target"] in PORTED_CONTENT_EMBEDDINGS, ( - f"{original_content_embedding_config['target']} has not yet been ported to diffusers." - ) + assert ( + original_diffusion_config["target"] in PORTED_DIFFUSIONS + ), f"{original_diffusion_config['target']} has not yet been ported to diffusers." + assert ( + original_transformer_config["target"] in PORTED_TRANSFORMERS + ), f"{original_transformer_config['target']} has not yet been ported to diffusers." + assert ( + original_content_embedding_config["target"] in PORTED_CONTENT_EMBEDDINGS + ), f"{original_content_embedding_config['target']} has not yet been ported to diffusers." original_diffusion_config = original_diffusion_config["params"] original_transformer_config = original_transformer_config["params"] diff --git a/src/diffusers/loaders/lora_conversion_utils.py b/src/diffusers/loaders/lora_conversion_utils.py index 791b7ae9b14f..5ec16ff299eb 100644 --- a/src/diffusers/loaders/lora_conversion_utils.py +++ b/src/diffusers/loaders/lora_conversion_utils.py @@ -184,9 +184,9 @@ def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_ # Store DoRA scale if present. if dora_present_in_unet: dora_scale_key_to_replace = "_lora.down." if "_lora.down." in diffusers_name else ".lora.down." - unet_state_dict[diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.")] = ( - state_dict.pop(key.replace("lora_down.weight", "dora_scale")) - ) + unet_state_dict[ + diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.") + ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) # Handle text encoder LoRAs. elif lora_name.startswith(("lora_te_", "lora_te1_", "lora_te2_")): @@ -206,13 +206,13 @@ def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_ "_lora.down." if "_lora.down." in diffusers_name else ".lora_linear_layer." ) if lora_name.startswith(("lora_te_", "lora_te1_")): - te_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = ( - state_dict.pop(key.replace("lora_down.weight", "dora_scale")) - ) + te_state_dict[ + diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.") + ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) elif lora_name.startswith("lora_te2_"): - te2_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = ( - state_dict.pop(key.replace("lora_down.weight", "dora_scale")) - ) + te2_state_dict[ + diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.") + ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) # Store alpha if present. if lora_name_alpha in state_dict: @@ -1020,21 +1020,21 @@ def _convert_bfl_flux_control_lora_to_diffusers(original_state_dict): for lora_key in ["lora_A", "lora_B"]: ## time_text_embed.timestep_embedder <- time_in - converted_state_dict[f"time_text_embed.timestep_embedder.linear_1.{lora_key}.weight"] = ( - original_state_dict.pop(f"time_in.in_layer.{lora_key}.weight") - ) + converted_state_dict[ + f"time_text_embed.timestep_embedder.linear_1.{lora_key}.weight" + ] = original_state_dict.pop(f"time_in.in_layer.{lora_key}.weight") if f"time_in.in_layer.{lora_key}.bias" in original_state_dict_keys: - converted_state_dict[f"time_text_embed.timestep_embedder.linear_1.{lora_key}.bias"] = ( - original_state_dict.pop(f"time_in.in_layer.{lora_key}.bias") - ) + converted_state_dict[ + f"time_text_embed.timestep_embedder.linear_1.{lora_key}.bias" + ] = original_state_dict.pop(f"time_in.in_layer.{lora_key}.bias") - converted_state_dict[f"time_text_embed.timestep_embedder.linear_2.{lora_key}.weight"] = ( - original_state_dict.pop(f"time_in.out_layer.{lora_key}.weight") - ) + converted_state_dict[ + f"time_text_embed.timestep_embedder.linear_2.{lora_key}.weight" + ] = original_state_dict.pop(f"time_in.out_layer.{lora_key}.weight") if f"time_in.out_layer.{lora_key}.bias" in original_state_dict_keys: - converted_state_dict[f"time_text_embed.timestep_embedder.linear_2.{lora_key}.bias"] = ( - original_state_dict.pop(f"time_in.out_layer.{lora_key}.bias") - ) + converted_state_dict[ + f"time_text_embed.timestep_embedder.linear_2.{lora_key}.bias" + ] = original_state_dict.pop(f"time_in.out_layer.{lora_key}.bias") ## time_text_embed.text_embedder <- vector_in converted_state_dict[f"time_text_embed.text_embedder.linear_1.{lora_key}.weight"] = original_state_dict.pop( @@ -1056,21 +1056,21 @@ def _convert_bfl_flux_control_lora_to_diffusers(original_state_dict): # guidance has_guidance = any("guidance" in k for k in original_state_dict) if has_guidance: - converted_state_dict[f"time_text_embed.guidance_embedder.linear_1.{lora_key}.weight"] = ( - original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.weight") - ) + converted_state_dict[ + f"time_text_embed.guidance_embedder.linear_1.{lora_key}.weight" + ] = original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.weight") if f"guidance_in.in_layer.{lora_key}.bias" in original_state_dict_keys: - converted_state_dict[f"time_text_embed.guidance_embedder.linear_1.{lora_key}.bias"] = ( - original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.bias") - ) + converted_state_dict[ + f"time_text_embed.guidance_embedder.linear_1.{lora_key}.bias" + ] = original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.bias") - converted_state_dict[f"time_text_embed.guidance_embedder.linear_2.{lora_key}.weight"] = ( - original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.weight") - ) + converted_state_dict[ + f"time_text_embed.guidance_embedder.linear_2.{lora_key}.weight" + ] = original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.weight") if f"guidance_in.out_layer.{lora_key}.bias" in original_state_dict_keys: - converted_state_dict[f"time_text_embed.guidance_embedder.linear_2.{lora_key}.bias"] = ( - original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.bias") - ) + converted_state_dict[ + f"time_text_embed.guidance_embedder.linear_2.{lora_key}.bias" + ] = original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.bias") # context_embedder converted_state_dict[f"context_embedder.{lora_key}.weight"] = original_state_dict.pop( diff --git a/src/diffusers/models/transformers/transformer_2d.py b/src/diffusers/models/transformers/transformer_2d.py index 5515a7885098..a88ee6c9c9b8 100644 --- a/src/diffusers/models/transformers/transformer_2d.py +++ b/src/diffusers/models/transformers/transformer_2d.py @@ -211,9 +211,9 @@ def _init_continuous_input(self, norm_type): def _init_vectorized_inputs(self, norm_type): assert self.config.sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" - assert self.config.num_vector_embeds is not None, ( - "Transformer2DModel over discrete input must provide num_embed" - ) + assert ( + self.config.num_vector_embeds is not None + ), "Transformer2DModel over discrete input must provide num_embed" self.height = self.config.sample_size self.width = self.config.sample_size diff --git a/src/diffusers/pipelines/free_noise_utils.py b/src/diffusers/pipelines/free_noise_utils.py index 8ea5eb7dd575..dc0071a494e3 100644 --- a/src/diffusers/pipelines/free_noise_utils.py +++ b/src/diffusers/pipelines/free_noise_utils.py @@ -341,9 +341,9 @@ def _encode_prompt_free_noise( start_tensor = negative_prompt_embeds[i].unsqueeze(0) end_tensor = negative_prompt_embeds[i + 1].unsqueeze(0) - negative_prompt_interpolation_embeds[start_frame : end_frame + 1] = ( - self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor) - ) + negative_prompt_interpolation_embeds[ + start_frame : end_frame + 1 + ] = self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor) prompt_embeds = prompt_interpolation_embeds negative_prompt_embeds = negative_prompt_interpolation_embeds diff --git a/src/diffusers/pipelines/omnigen/processor_omnigen.py b/src/diffusers/pipelines/omnigen/processor_omnigen.py index 40fac01f8f8a..75d272ac5140 100644 --- a/src/diffusers/pipelines/omnigen/processor_omnigen.py +++ b/src/diffusers/pipelines/omnigen/processor_omnigen.py @@ -95,13 +95,13 @@ def process_multi_modal_prompt(self, text, input_images): image_ids = [int(s.split("|")[1].split("_")[-1]) for s in image_tags] unique_image_ids = sorted(set(image_ids)) - assert unique_image_ids == list(range(1, len(unique_image_ids) + 1)), ( - f"image_ids must start from 1, and must be continuous int, e.g. [1, 2, 3], cannot be {unique_image_ids}" - ) + assert unique_image_ids == list( + range(1, len(unique_image_ids) + 1) + ), f"image_ids must start from 1, and must be continuous int, e.g. [1, 2, 3], cannot be {unique_image_ids}" # total images must be the same as the number of image tags - assert len(unique_image_ids) == len(input_images), ( - f"total images must be the same as the number of image tags, got {len(unique_image_ids)} image tags and {len(input_images)} images" - ) + assert ( + len(unique_image_ids) == len(input_images) + ), f"total images must be the same as the number of image tags, got {len(unique_image_ids)} image tags and {len(input_images)} images" input_images = [input_images[x - 1] for x in image_ids] diff --git a/src/diffusers/pipelines/shap_e/renderer.py b/src/diffusers/pipelines/shap_e/renderer.py index dd25945590cd..9d9f9d9b2ab1 100644 --- a/src/diffusers/pipelines/shap_e/renderer.py +++ b/src/diffusers/pipelines/shap_e/renderer.py @@ -983,9 +983,9 @@ def decode_to_mesh( fields = torch.cat(fields, dim=1) fields = fields.float() - assert len(fields.shape) == 3 and fields.shape[-1] == 1, ( - f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}" - ) + assert ( + len(fields.shape) == 3 and fields.shape[-1] == 1 + ), f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}" fields = fields.reshape(1, *([grid_size] * 3)) @@ -1039,9 +1039,9 @@ def decode_to_mesh( textures = textures.float() # 3.3 augument the mesh with texture data - assert len(textures.shape) == 3 and textures.shape[-1] == len(texture_channels), ( - f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}" - ) + assert len(textures.shape) == 3 and textures.shape[-1] == len( + texture_channels + ), f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}" for m, texture in zip(raw_meshes, textures): texture = texture[: len(m.verts)] diff --git a/src/diffusers/quantizers/base.py b/src/diffusers/quantizers/base.py index fa9ba98e6d0d..1c75b5bef933 100644 --- a/src/diffusers/quantizers/base.py +++ b/src/diffusers/quantizers/base.py @@ -215,15 +215,19 @@ def _dequantize(self, model): ) @abstractmethod - def _process_model_before_weight_loading(self, model, **kwargs): ... + def _process_model_before_weight_loading(self, model, **kwargs): + ... @abstractmethod - def _process_model_after_weight_loading(self, model, **kwargs): ... + def _process_model_after_weight_loading(self, model, **kwargs): + ... @property @abstractmethod - def is_serializable(self): ... + def is_serializable(self): + ... @property @abstractmethod - def is_trainable(self): ... + def is_trainable(self): + ... diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index f82a2407f333..6155ac2e39fd 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -299,9 +299,9 @@ def test_one_request_upon_cached(self): ) download_requests = [r.method for r in m.request_history] - assert download_requests.count("HEAD") == 3, ( - "3 HEAD requests one for config, one for model, and one for shard index file." - ) + assert ( + download_requests.count("HEAD") == 3 + ), "3 HEAD requests one for config, one for model, and one for shard index file." assert download_requests.count("GET") == 2, "2 GET requests one for config, one for model" with requests_mock.mock(real_http=True) as m: @@ -313,9 +313,9 @@ def test_one_request_upon_cached(self): ) cache_requests = [r.method for r in m.request_history] - assert "HEAD" == cache_requests[0] and len(cache_requests) == 2, ( - "We should call only `model_info` to check for commit hash and knowing if shard index is present." - ) + assert ( + "HEAD" == cache_requests[0] and len(cache_requests) == 2 + ), "We should call only `model_info` to check for commit hash and knowing if shard index is present." def test_weight_overwrite(self): with tempfile.TemporaryDirectory() as tmpdirname, self.assertRaises(ValueError) as error_context: diff --git a/tests/models/transformers/test_models_transformer_sd3.py b/tests/models/transformers/test_models_transformer_sd3.py index bfef1fc4f09b..659d9a82fd76 100644 --- a/tests/models/transformers/test_models_transformer_sd3.py +++ b/tests/models/transformers/test_models_transformer_sd3.py @@ -92,9 +92,9 @@ def test_xformers_enable_works(self): model.enable_xformers_memory_efficient_attention() - assert model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor", ( - "xformers is not enabled" - ) + assert ( + model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor" + ), "xformers is not enabled" @unittest.skip("SD3Transformer2DModel uses a dedicated attention processor. This test doesn't apply") def test_set_attn_processor_for_determinism(self): @@ -167,9 +167,9 @@ def test_xformers_enable_works(self): model.enable_xformers_memory_efficient_attention() - assert model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor", ( - "xformers is not enabled" - ) + assert ( + model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor" + ), "xformers is not enabled" @unittest.skip("SD3Transformer2DModel uses a dedicated attention processor. This test doesn't apply") def test_set_attn_processor_for_determinism(self): diff --git a/tests/models/unets/test_models_unet_2d_condition.py b/tests/models/unets/test_models_unet_2d_condition.py index d01a0b493520..8e1187f11468 100644 --- a/tests/models/unets/test_models_unet_2d_condition.py +++ b/tests/models/unets/test_models_unet_2d_condition.py @@ -654,22 +654,22 @@ def test_model_xattn_mask(self, mask_dtype): keepall_mask = torch.ones(*cond.shape[:-1], device=cond.device, dtype=mask_dtype) full_cond_keepallmask_out = model(**{**inputs_dict, "encoder_attention_mask": keepall_mask}).sample - assert full_cond_keepallmask_out.allclose(full_cond_out, rtol=1e-05, atol=1e-05), ( - "a 'keep all' mask should give the same result as no mask" - ) + assert full_cond_keepallmask_out.allclose( + full_cond_out, rtol=1e-05, atol=1e-05 + ), "a 'keep all' mask should give the same result as no mask" trunc_cond = cond[:, :-1, :] trunc_cond_out = model(**{**inputs_dict, "encoder_hidden_states": trunc_cond}).sample - assert not trunc_cond_out.allclose(full_cond_out, rtol=1e-05, atol=1e-05), ( - "discarding the last token from our cond should change the result" - ) + assert not trunc_cond_out.allclose( + full_cond_out, rtol=1e-05, atol=1e-05 + ), "discarding the last token from our cond should change the result" batch, tokens, _ = cond.shape mask_last = (torch.arange(tokens) < tokens - 1).expand(batch, -1).to(cond.device, mask_dtype) masked_cond_out = model(**{**inputs_dict, "encoder_attention_mask": mask_last}).sample - assert masked_cond_out.allclose(trunc_cond_out, rtol=1e-05, atol=1e-05), ( - "masking the last token from our cond should be equivalent to truncating that token out of the condition" - ) + assert masked_cond_out.allclose( + trunc_cond_out, rtol=1e-05, atol=1e-05 + ), "masking the last token from our cond should be equivalent to truncating that token out of the condition" # see diffusers.models.attention_processor::Attention#prepare_attention_mask # note: we may not need to fix mask padding to work for stable-diffusion cross-attn masks. @@ -697,9 +697,9 @@ def test_model_xattn_padding(self): trunc_mask = torch.zeros(batch, tokens - 1, device=cond.device, dtype=torch.bool) trunc_mask_out = model(**{**inputs_dict, "encoder_attention_mask": trunc_mask}).sample - assert trunc_mask_out.allclose(keeplast_out), ( - "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask." - ) + assert trunc_mask_out.allclose( + keeplast_out + ), "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask." def test_custom_diffusion_processors(self): # enable deterministic behavior for gradient checkpointing @@ -1114,12 +1114,12 @@ def test_load_attn_procs_raise_warning(self): with torch.no_grad(): lora_sample_2 = model(**inputs_dict).sample - assert not torch.allclose(non_lora_sample, lora_sample_1, atol=1e-4, rtol=1e-4), ( - "LoRA injected UNet should produce different results." - ) - assert torch.allclose(lora_sample_1, lora_sample_2, atol=1e-4, rtol=1e-4), ( - "Loading from a saved checkpoint should produce identical results." - ) + assert not torch.allclose( + non_lora_sample, lora_sample_1, atol=1e-4, rtol=1e-4 + ), "LoRA injected UNet should produce different results." + assert torch.allclose( + lora_sample_1, lora_sample_2, atol=1e-4, rtol=1e-4 + ), "Loading from a saved checkpoint should produce identical results." @require_peft_backend def test_save_attn_procs_raise_warning(self): diff --git a/tests/others/test_image_processor.py b/tests/others/test_image_processor.py index 071194c59ead..3397ca9e394a 100644 --- a/tests/others/test_image_processor.py +++ b/tests/others/test_image_processor.py @@ -65,9 +65,9 @@ def test_vae_image_processor_pt(self): ) out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np - assert np.abs(in_np - out_np).max() < 1e-6, ( - f"decoded output does not match input for output_type {output_type}" - ) + assert ( + np.abs(in_np - out_np).max() < 1e-6 + ), f"decoded output does not match input for output_type {output_type}" def test_vae_image_processor_np(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) @@ -78,9 +78,9 @@ def test_vae_image_processor_np(self): out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np - assert np.abs(in_np - out_np).max() < 1e-6, ( - f"decoded output does not match input for output_type {output_type}" - ) + assert ( + np.abs(in_np - out_np).max() < 1e-6 + ), f"decoded output does not match input for output_type {output_type}" def test_vae_image_processor_pil(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) @@ -93,9 +93,9 @@ def test_vae_image_processor_pil(self): for i, o in zip(input_pil, out): in_np = np.array(i) out_np = self.to_np(out) if output_type == "pil" else (self.to_np(out) * 255).round() - assert np.abs(in_np - out_np).max() < 1e-6, ( - f"decoded output does not match input for output_type {output_type}" - ) + assert ( + np.abs(in_np - out_np).max() < 1e-6 + ), f"decoded output does not match input for output_type {output_type}" def test_preprocess_input_3d(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) @@ -293,9 +293,9 @@ def test_vae_image_processor_resize_pt(self): scale = 2 out_pt = image_processor.resize(image=input_pt, height=h // scale, width=w // scale) exp_pt_shape = (b, c, h // scale, w // scale) - assert out_pt.shape == exp_pt_shape, ( - f"resized image output shape '{out_pt.shape}' didn't match expected shape '{exp_pt_shape}'." - ) + assert ( + out_pt.shape == exp_pt_shape + ), f"resized image output shape '{out_pt.shape}' didn't match expected shape '{exp_pt_shape}'." def test_vae_image_processor_resize_np(self): image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) @@ -305,6 +305,6 @@ def test_vae_image_processor_resize_np(self): input_np = self.to_np(input_pt) out_np = image_processor.resize(image=input_np, height=h // scale, width=w // scale) exp_np_shape = (b, h // scale, w // scale, c) - assert out_np.shape == exp_np_shape, ( - f"resized image output shape '{out_np.shape}' didn't match expected shape '{exp_np_shape}'." - ) + assert ( + out_np.shape == exp_np_shape + ), f"resized image output shape '{out_np.shape}' didn't match expected shape '{exp_np_shape}'." diff --git a/tests/pipelines/amused/test_amused.py b/tests/pipelines/amused/test_amused.py index ac579bbf2be2..a0fbc5df1c28 100644 --- a/tests/pipelines/amused/test_amused.py +++ b/tests/pipelines/amused/test_amused.py @@ -126,7 +126,8 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): ... + def test_inference_batch_single_identical(self): + ... @slow diff --git a/tests/pipelines/amused/test_amused_img2img.py b/tests/pipelines/amused/test_amused_img2img.py index 942735f15707..2699bbe7f56f 100644 --- a/tests/pipelines/amused/test_amused_img2img.py +++ b/tests/pipelines/amused/test_amused_img2img.py @@ -126,7 +126,8 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): ... + def test_inference_batch_single_identical(self): + ... @slow diff --git a/tests/pipelines/amused/test_amused_inpaint.py b/tests/pipelines/amused/test_amused_inpaint.py index 541b988f1798..645379a7eab1 100644 --- a/tests/pipelines/amused/test_amused_inpaint.py +++ b/tests/pipelines/amused/test_amused_inpaint.py @@ -130,7 +130,8 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): ... + def test_inference_batch_single_identical(self): + ... @slow diff --git a/tests/pipelines/aura_flow/test_pipeline_aura_flow.py b/tests/pipelines/aura_flow/test_pipeline_aura_flow.py index 1eb9d1035c33..c56aeb905ac3 100644 --- a/tests/pipelines/aura_flow/test_pipeline_aura_flow.py +++ b/tests/pipelines/aura_flow/test_pipeline_aura_flow.py @@ -106,9 +106,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -122,15 +122,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." @unittest.skip("xformers attention processor does not exist for AuraFlow") def test_xformers_attention_forwardGenerator_pass(self): diff --git a/tests/pipelines/blipdiffusion/test_blipdiffusion.py b/tests/pipelines/blipdiffusion/test_blipdiffusion.py index db8d36b23a4b..e073f55aec9e 100644 --- a/tests/pipelines/blipdiffusion/test_blipdiffusion.py +++ b/tests/pipelines/blipdiffusion/test_blipdiffusion.py @@ -195,9 +195,9 @@ def test_blipdiffusion(self): [0.5329548, 0.8372512, 0.33269387, 0.82096875, 0.43657133, 0.3783, 0.5953028, 0.51934963, 0.42142007] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {image_slice.flatten()}, but got {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {image_slice.flatten()}, but got {image_slice.flatten()}" @unittest.skip("Test not supported because of complexities in deriving query_embeds.") def test_encode_prompt_works_in_isolation(self): diff --git a/tests/pipelines/cogvideo/test_cogvideox.py b/tests/pipelines/cogvideo/test_cogvideox.py index a9de0ff05fe8..388dc9ef7ec4 100644 --- a/tests/pipelines/cogvideo/test_cogvideox.py +++ b/tests/pipelines/cogvideo/test_cogvideox.py @@ -299,9 +299,9 @@ def test_fused_qkv_projections(self): original_image_slice = frames[0, -2:, -1, -3:, -3:] pipe.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -315,15 +315,15 @@ def test_fused_qkv_projections(self): frames = pipe(**inputs).frames image_slice_disabled = frames[0, -2:, -1, -3:, -3:] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." @slow diff --git a/tests/pipelines/cogvideo/test_cogvideox_fun_control.py b/tests/pipelines/cogvideo/test_cogvideox_fun_control.py index 4f32da7ac4ae..2e962bd247b9 100644 --- a/tests/pipelines/cogvideo/test_cogvideox_fun_control.py +++ b/tests/pipelines/cogvideo/test_cogvideox_fun_control.py @@ -299,9 +299,9 @@ def test_fused_qkv_projections(self): original_image_slice = frames[0, -2:, -1, -3:, -3:] pipe.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -315,12 +315,12 @@ def test_fused_qkv_projections(self): frames = pipe(**inputs).frames image_slice_disabled = frames[0, -2:, -1, -3:, -3:] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." diff --git a/tests/pipelines/cogvideo/test_cogvideox_image2video.py b/tests/pipelines/cogvideo/test_cogvideox_image2video.py index ec4e51bd1bad..cac47f1a83d4 100644 --- a/tests/pipelines/cogvideo/test_cogvideox_image2video.py +++ b/tests/pipelines/cogvideo/test_cogvideox_image2video.py @@ -317,9 +317,9 @@ def test_fused_qkv_projections(self): original_image_slice = frames[0, -2:, -1, -3:, -3:] pipe.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -333,15 +333,15 @@ def test_fused_qkv_projections(self): frames = pipe(**inputs).frames image_slice_disabled = frames[0, -2:, -1, -3:, -3:] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." @slow diff --git a/tests/pipelines/cogvideo/test_cogvideox_video2video.py b/tests/pipelines/cogvideo/test_cogvideox_video2video.py index b1ac8cbd90ed..4d836cb5e2a4 100644 --- a/tests/pipelines/cogvideo/test_cogvideox_video2video.py +++ b/tests/pipelines/cogvideo/test_cogvideox_video2video.py @@ -298,9 +298,9 @@ def test_fused_qkv_projections(self): original_image_slice = frames[0, -2:, -1, -3:, -3:] pipe.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -314,12 +314,12 @@ def test_fused_qkv_projections(self): frames = pipe(**inputs).frames image_slice_disabled = frames[0, -2:, -1, -3:, -3:] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." diff --git a/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py b/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py index a5768cb51fbf..eedda4e21722 100644 --- a/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py +++ b/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py @@ -219,9 +219,9 @@ def test_blipdiffusion_controlnet(self): assert image.shape == (1, 16, 16, 4) expected_slice = np.array([0.7953, 0.7136, 0.6597, 0.4779, 0.7389, 0.4111, 0.5826, 0.4150, 0.8422]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" @unittest.skip("Test not supported because of complexities in deriving query_embeds.") def test_encode_prompt_works_in_isolation(self): diff --git a/tests/pipelines/controlnet_flux/test_controlnet_flux.py b/tests/pipelines/controlnet_flux/test_controlnet_flux.py index 9ce62cde9fe4..9a270c2bbf07 100644 --- a/tests/pipelines/controlnet_flux/test_controlnet_flux.py +++ b/tests/pipelines/controlnet_flux/test_controlnet_flux.py @@ -178,9 +178,9 @@ def test_controlnet_flux(self): [0.47387695, 0.63134766, 0.5605469, 0.61621094, 0.7207031, 0.7089844, 0.70410156, 0.6113281, 0.64160156] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f"Expected: {expected_slice}, got: {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f"Expected: {expected_slice}, got: {image_slice.flatten()}" @unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention") def test_xformers_attention_forwardGenerator_pass(self): diff --git a/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py b/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py index 8d63619c402b..59ccb9237819 100644 --- a/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py +++ b/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py @@ -170,9 +170,9 @@ def test_fused_qkv_projections(self): original_image_slice = image[0, -3:, -3:, -1] pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -186,15 +186,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) diff --git a/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py b/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py index 4bd7f59dc0a8..f7b3db05c8af 100644 --- a/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py +++ b/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py @@ -162,9 +162,9 @@ def test_controlnet_hunyuandit(self): [0.6953125, 0.89208984, 0.59375, 0.5078125, 0.5786133, 0.6035156, 0.5839844, 0.53564453, 0.52246094] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f"Expected: {expected_slice}, got: {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f"Expected: {expected_slice}, got: {image_slice.flatten()}" def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( diff --git a/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py b/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py index d9f5dcad7d61..2cd57ce56d52 100644 --- a/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py +++ b/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py @@ -194,9 +194,9 @@ def test_controlnet_inpaint_sd3(self): [0.51708984, 0.7421875, 0.4580078, 0.6435547, 0.65625, 0.43603516, 0.5151367, 0.65722656, 0.60839844] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f"Expected: {expected_slice}, got: {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f"Expected: {expected_slice}, got: {image_slice.flatten()}" @unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention") def test_xformers_attention_forwardGenerator_pass(self): diff --git a/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py index 1be15645efd7..84ce09acbe1a 100644 --- a/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py +++ b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py @@ -202,9 +202,9 @@ def run_pipe(self, components, use_sd35=False): else: expected_slice = np.array([1.0000, 0.9072, 0.4209, 0.2744, 0.5737, 0.3840, 0.6113, 0.6250, 0.6328]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f"Expected: {expected_slice}, got: {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f"Expected: {expected_slice}, got: {image_slice.flatten()}" def test_controlnet_sd3(self): components = self.get_dummy_components() diff --git a/tests/pipelines/flux/test_pipeline_flux.py b/tests/pipelines/flux/test_pipeline_flux.py index 646ad928ec05..6a560367a5b8 100644 --- a/tests/pipelines/flux/test_pipeline_flux.py +++ b/tests/pipelines/flux/test_pipeline_flux.py @@ -170,9 +170,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -186,15 +186,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) diff --git a/tests/pipelines/flux/test_pipeline_flux_control.py b/tests/pipelines/flux/test_pipeline_flux_control.py index d8d0774e1e32..d8293952adcb 100644 --- a/tests/pipelines/flux/test_pipeline_flux_control.py +++ b/tests/pipelines/flux/test_pipeline_flux_control.py @@ -140,9 +140,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -156,15 +156,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) diff --git a/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py b/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py index a2f7c9171082..44ce2a4dedfc 100644 --- a/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py +++ b/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py @@ -134,9 +134,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -150,15 +150,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) diff --git a/tests/pipelines/hunyuandit/test_hunyuan_dit.py b/tests/pipelines/hunyuandit/test_hunyuan_dit.py index 66453b73b0b3..5c3a2cbea7ba 100644 --- a/tests/pipelines/hunyuandit/test_hunyuan_dit.py +++ b/tests/pipelines/hunyuandit/test_hunyuan_dit.py @@ -174,9 +174,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -192,15 +192,15 @@ def test_fused_qkv_projections(self): image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." @unittest.skip( "Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have." diff --git a/tests/pipelines/kandinsky/test_kandinsky.py b/tests/pipelines/kandinsky/test_kandinsky.py index f4de6f3a5338..30144e37a9d4 100644 --- a/tests/pipelines/kandinsky/test_kandinsky.py +++ b/tests/pipelines/kandinsky/test_kandinsky.py @@ -240,12 +240,12 @@ def test_kandinsky(self): expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py index f14a741d7dc1..c5f27a9cc9a9 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -98,12 +98,12 @@ def test_kandinsky(self): expected_slice = np.array([0.2893, 0.1464, 0.4603, 0.3529, 0.4612, 0.7701, 0.4027, 0.3051, 0.5155]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): @@ -206,12 +206,12 @@ def test_kandinsky(self): expected_slice = np.array([0.4852, 0.4136, 0.4539, 0.4781, 0.4680, 0.5217, 0.4973, 0.4089, 0.4977]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): @@ -318,12 +318,12 @@ def test_kandinsky(self): expected_slice = np.array([0.0320, 0.0860, 0.4013, 0.0518, 0.2484, 0.5847, 0.4411, 0.2321, 0.4593]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/kandinsky/test_kandinsky_img2img.py b/tests/pipelines/kandinsky/test_kandinsky_img2img.py index 169709978042..fee29fb150b7 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky/test_kandinsky_img2img.py @@ -261,12 +261,12 @@ def test_kandinsky_img2img(self): assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5816, 0.5872, 0.4634, 0.5982, 0.4767, 0.4710, 0.4669, 0.4717, 0.4966]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py index d4d5c4e48f78..79faa95984a0 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py @@ -256,12 +256,12 @@ def test_kandinsky_inpaint(self): expected_slice = np.array([0.8222, 0.8896, 0.4373, 0.8088, 0.4905, 0.2609, 0.6816, 0.4291, 0.5129]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky.py b/tests/pipelines/kandinsky2_2/test_kandinsky.py index aa17f6fc5d6b..fea49d47b7bb 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky.py @@ -210,13 +210,13 @@ def test_kandinsky(self): expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py b/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py index 17ef3dc2601e..90f8b2034109 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py @@ -103,12 +103,12 @@ def test_kandinsky(self): expected_slice = np.array([0.3076, 0.2729, 0.5668, 0.0522, 0.3384, 0.7028, 0.4908, 0.3659, 0.6243]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): @@ -227,12 +227,12 @@ def test_kandinsky(self): expected_slice = np.array([0.4445, 0.4287, 0.4596, 0.3919, 0.3730, 0.5039, 0.4834, 0.4269, 0.5521]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): @@ -350,12 +350,12 @@ def test_kandinsky(self): expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py index 10a95d6177b2..1f3219e0d69e 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py @@ -210,13 +210,13 @@ def test_kandinsky_controlnet(self): [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py index 58fbbecc0569..8613f5acf045 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py @@ -218,12 +218,12 @@ def test_kandinsky_controlnet_img2img(self): expected_slice = np.array( [0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1.75e-3) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py b/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py index aa7589a212eb..e0d90071f111 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py @@ -228,12 +228,12 @@ def test_kandinsky_img2img(self): assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py index d7ac69820761..c9c5f4155828 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py @@ -234,12 +234,12 @@ def test_kandinsky_inpaint(self): [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) diff --git a/tests/pipelines/kandinsky3/test_kandinsky3.py b/tests/pipelines/kandinsky3/test_kandinsky3.py index c54b91f024af..af1d45ff8975 100644 --- a/tests/pipelines/kandinsky3/test_kandinsky3.py +++ b/tests/pipelines/kandinsky3/test_kandinsky3.py @@ -157,9 +157,9 @@ def test_kandinsky3(self): expected_slice = np.array([0.3768, 0.4373, 0.4865, 0.4890, 0.4299, 0.5122, 0.4921, 0.4924, 0.5599]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py b/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py index 088c32e2860e..e00948621a06 100644 --- a/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py +++ b/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py @@ -181,9 +181,9 @@ def test_kandinsky3_img2img(self): [0.576259, 0.6132097, 0.41703486, 0.603196, 0.62062526, 0.4655338, 0.5434324, 0.5660727, 0.65433365] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/pag/test_pag_animatediff.py b/tests/pipelines/pag/test_pag_animatediff.py index b9ce29c70bdf..6fa96275406f 100644 --- a/tests/pipelines/pag/test_pag_animatediff.py +++ b/tests/pipelines/pag/test_pag_animatediff.py @@ -450,9 +450,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).frames[0, -3:, -3:, -1] components = self.get_dummy_components() diff --git a/tests/pipelines/pag/test_pag_controlnet_sd.py b/tests/pipelines/pag/test_pag_controlnet_sd.py index 02232c7379bd..ee97b0507a34 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sd.py +++ b/tests/pipelines/pag/test_pag_controlnet_sd.py @@ -169,9 +169,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py b/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py index cfc0b218d2e4..25ef5d253d68 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py +++ b/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py @@ -165,9 +165,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__calss__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__calss__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_controlnet_sdxl.py b/tests/pipelines/pag/test_pag_controlnet_sdxl.py index 10adff7fe0a6..0588e26286a8 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sdxl.py +++ b/tests/pipelines/pag/test_pag_controlnet_sdxl.py @@ -187,9 +187,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py b/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py index fe4b615f646b..63c7d9fbee2d 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py +++ b/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py @@ -189,9 +189,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_hunyuan_dit.py b/tests/pipelines/pag/test_pag_hunyuan_dit.py index d6cfbbed9e95..31cd9aa666de 100644 --- a/tests/pipelines/pag/test_pag_hunyuan_dit.py +++ b/tests/pipelines/pag/test_pag_hunyuan_dit.py @@ -177,15 +177,15 @@ def test_fused_qkv_projections(self): image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." def test_pag_disable_enable(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator @@ -198,9 +198,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() diff --git a/tests/pipelines/pag/test_pag_kolors.py b/tests/pipelines/pag/test_pag_kolors.py index c9f197b703ef..9a4f1daa2c05 100644 --- a/tests/pipelines/pag/test_pag_kolors.py +++ b/tests/pipelines/pag/test_pag_kolors.py @@ -140,9 +140,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_pixart_sigma.py b/tests/pipelines/pag/test_pag_pixart_sigma.py index 624b57844390..63f42416dbca 100644 --- a/tests/pipelines/pag/test_pag_pixart_sigma.py +++ b/tests/pipelines/pag/test_pag_pixart_sigma.py @@ -120,9 +120,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe.__class__.__name__}." out = pipe(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_sana.py b/tests/pipelines/pag/test_pag_sana.py index ee1e359383e9..a2c657297860 100644 --- a/tests/pipelines/pag/test_pag_sana.py +++ b/tests/pipelines/pag/test_pag_sana.py @@ -268,9 +268,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() diff --git a/tests/pipelines/pag/test_pag_sd.py b/tests/pipelines/pag/test_pag_sd.py index bc20226873f6..d4cf00b034ff 100644 --- a/tests/pipelines/pag/test_pag_sd.py +++ b/tests/pipelines/pag/test_pag_sd.py @@ -154,9 +154,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -328,9 +328,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" def test_pag_uncond(self): pipeline = AutoPipelineForText2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -345,6 +345,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" diff --git a/tests/pipelines/pag/test_pag_sd3.py b/tests/pipelines/pag/test_pag_sd3.py index 737e238e5fbf..41ff0c3c09f4 100644 --- a/tests/pipelines/pag/test_pag_sd3.py +++ b/tests/pipelines/pag/test_pag_sd3.py @@ -170,9 +170,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -186,15 +186,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." def test_pag_disable_enable(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator @@ -207,9 +207,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() diff --git a/tests/pipelines/pag/test_pag_sd3_img2img.py b/tests/pipelines/pag/test_pag_sd3_img2img.py index fe593d47dc75..2fe988929185 100644 --- a/tests/pipelines/pag/test_pag_sd3_img2img.py +++ b/tests/pipelines/pag/test_pag_sd3_img2img.py @@ -149,9 +149,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() @@ -254,9 +254,9 @@ def test_pag_cfg(self): 0.17822266, ] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" def test_pag_uncond(self): pipeline = AutoPipelineForImage2Image.from_pretrained( @@ -272,6 +272,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.1508789, 0.16210938, 0.17138672, 0.16210938, 0.17089844, 0.16137695, 0.16235352, 0.16430664, 0.16455078] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" diff --git a/tests/pipelines/pag/test_pag_sd_img2img.py b/tests/pipelines/pag/test_pag_sd_img2img.py index ef70985571c9..d000493d6bd1 100644 --- a/tests/pipelines/pag/test_pag_sd_img2img.py +++ b/tests/pipelines/pag/test_pag_sd_img2img.py @@ -161,9 +161,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -267,9 +267,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" def test_pag_uncond(self): pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -285,6 +285,6 @@ def test_pag_uncond(self): [0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" diff --git a/tests/pipelines/pag/test_pag_sd_inpaint.py b/tests/pipelines/pag/test_pag_sd_inpaint.py index 04ec8b216551..06682c111d37 100644 --- a/tests/pipelines/pag/test_pag_sd_inpaint.py +++ b/tests/pipelines/pag/test_pag_sd_inpaint.py @@ -302,9 +302,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.38793945, 0.4111328, 0.47924805, 0.39208984, 0.4165039, 0.41674805, 0.37060547, 0.36791992, 0.40625] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" def test_pag_uncond(self): pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -319,6 +319,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.3876953, 0.40356445, 0.4934082, 0.39697266, 0.41674805, 0.41015625, 0.375, 0.36914062, 0.40649414] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" diff --git a/tests/pipelines/pag/test_pag_sdxl.py b/tests/pipelines/pag/test_pag_sdxl.py index fc4ce1067f76..b35b2b1d2f7e 100644 --- a/tests/pipelines/pag/test_pag_sdxl.py +++ b/tests/pipelines/pag/test_pag_sdxl.py @@ -167,9 +167,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -331,9 +331,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.3123679, 0.31725878, 0.32026544, 0.327533, 0.3266391, 0.3303998, 0.33544615, 0.34181812, 0.34102726] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" def test_pag_uncond(self): pipeline = AutoPipelineForText2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -348,6 +348,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.47400922, 0.48650584, 0.4839625, 0.4724013, 0.4890427, 0.49544555, 0.51707107, 0.54299414, 0.5224372] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" diff --git a/tests/pipelines/pag/test_pag_sdxl_img2img.py b/tests/pipelines/pag/test_pag_sdxl_img2img.py index 0e5c2cc7f93a..c94a6836de7f 100644 --- a/tests/pipelines/pag/test_pag_sdxl_img2img.py +++ b/tests/pipelines/pag/test_pag_sdxl_img2img.py @@ -215,9 +215,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -316,9 +316,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.20301354, 0.21078318, 0.2021082, 0.20277798, 0.20681083, 0.19562206, 0.20121682, 0.21562952, 0.21277016] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" def test_pag_uncond(self): pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -333,6 +333,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.21303111, 0.22188407, 0.2124992, 0.21365267, 0.18823743, 0.17569828, 0.21113116, 0.19419771, 0.18919235] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" diff --git a/tests/pipelines/pag/test_pag_sdxl_inpaint.py b/tests/pipelines/pag/test_pag_sdxl_inpaint.py index 854c65cbc761..cca5292288b0 100644 --- a/tests/pipelines/pag/test_pag_sdxl_inpaint.py +++ b/tests/pipelines/pag/test_pag_sdxl_inpaint.py @@ -220,9 +220,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( - f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." - ) + assert ( + "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters + ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -322,9 +322,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.41385046, 0.39608297, 0.4360491, 0.26872507, 0.32187328, 0.4242474, 0.2603805, 0.34167895, 0.46561807] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" def test_pag_uncond(self): pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -339,6 +339,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.41597816, 0.39302617, 0.44287828, 0.2687074, 0.28315824, 0.40582314, 0.20877528, 0.2380802, 0.39447647] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( - f"output is different from expected, {image_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + ), f"output is different from expected, {image_slice.flatten()}" diff --git a/tests/pipelines/pixart_sigma/test_pixart.py b/tests/pipelines/pixart_sigma/test_pixart.py index 7084fc9bcec8..b220afcfc25a 100644 --- a/tests/pipelines/pixart_sigma/test_pixart.py +++ b/tests/pipelines/pixart_sigma/test_pixart.py @@ -260,9 +260,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -276,15 +276,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." @slow diff --git a/tests/pipelines/stable_cascade/test_stable_cascade_combined.py b/tests/pipelines/stable_cascade/test_stable_cascade_combined.py index d433a461bd9d..1765f3a02242 100644 --- a/tests/pipelines/stable_cascade/test_stable_cascade_combined.py +++ b/tests/pipelines/stable_cascade/test_stable_cascade_combined.py @@ -198,12 +198,12 @@ def test_stable_cascade(self): assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 3b5c7a24b4ca..6e17b86639ea 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -293,15 +293,15 @@ def test_stable_diffusion_ays(self): inputs["sigmas"] = sigma_schedule output_sigmas = sd_pipe(**inputs).images - assert np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3, ( - "ays timesteps and ays sigmas should have the same outputs" - ) - assert np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3, ( - "use ays timesteps should have different outputs" - ) - assert np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3, ( - "use ays sigmas should have different outputs" - ) + assert ( + np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3 + ), "ays timesteps and ays sigmas should have the same outputs" + assert ( + np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3 + ), "use ays timesteps should have different outputs" + assert ( + np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3 + ), "use ays sigmas should have different outputs" def test_stable_diffusion_prompt_embeds(self): components = self.get_dummy_components() @@ -656,9 +656,9 @@ def test_freeu_enabled(self): sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) output_freeu = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images - assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), ( - "Enabling of FreeU should lead to different results." - ) + assert not np.allclose( + output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1] + ), "Enabling of FreeU should lead to different results." def test_freeu_disabled(self): components = self.get_dummy_components() @@ -681,9 +681,9 @@ def test_freeu_disabled(self): prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0) ).images - assert np.allclose(output[0, -3:, -3:, -1], output_no_freeu[0, -3:, -3:, -1]), ( - "Disabling of FreeU should lead to results similar to the default pipeline results." - ) + assert np.allclose( + output[0, -3:, -3:, -1], output_no_freeu[0, -3:, -3:, -1] + ), "Disabling of FreeU should lead to results similar to the default pipeline results." def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator @@ -706,15 +706,15 @@ def test_fused_qkv_projections(self): image = sd_pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." def test_pipeline_interrupt(self): components = self.get_dummy_components() diff --git a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py index 8e2fa77fc083..38ef6143f4c0 100644 --- a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py +++ b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py @@ -171,9 +171,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist(pipe.transformer), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) + assert check_qkv_fusion_processors_exist( + pipe.transformer + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -187,15 +187,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." def test_skip_guidance_layers(self): components = self.get_dummy_components() diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py index a41e7dc7f342..c68cdf67036a 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py @@ -242,15 +242,15 @@ def test_stable_diffusion_ays(self): inputs["sigmas"] = sigma_schedule output_sigmas = sd_pipe(**inputs).images - assert np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3, ( - "ays timesteps and ays sigmas should have the same outputs" - ) - assert np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3, ( - "use ays timesteps should have different outputs" - ) - assert np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3, ( - "use ays sigmas should have different outputs" - ) + assert ( + np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3 + ), "ays timesteps and ays sigmas should have the same outputs" + assert ( + np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3 + ), "use ays timesteps should have different outputs" + assert ( + np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3 + ), "use ays sigmas should have different outputs" def test_ip_adapter(self): expected_pipe_slice = None @@ -742,9 +742,9 @@ def new_step(self, *args, **kwargs): inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] - assert expected_steps_1 == done_steps, ( - f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" - ) + assert ( + expected_steps_1 == done_steps + ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" with self.assertRaises(ValueError) as cm: inputs_2 = { @@ -771,9 +771,9 @@ def new_step(self, *args, **kwargs): pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] - assert expected_steps == done_steps, ( - f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" - ) + assert ( + expected_steps == done_steps + ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py index 729c6981d2b5..66ae581a0529 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py @@ -585,9 +585,9 @@ def new_step(self, *args, **kwargs): inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] - assert expected_steps_1 == done_steps, ( - f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" - ) + assert ( + expected_steps_1 == done_steps + ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" inputs_2 = { **inputs, @@ -601,9 +601,9 @@ def new_step(self, *args, **kwargs): pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] - assert expected_steps == done_steps, ( - f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" - ) + assert ( + expected_steps == done_steps + ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): diff --git a/tests/pipelines/test_pipelines.py b/tests/pipelines/test_pipelines.py index 00c7636ed9fd..ae5a12e04ba8 100644 --- a/tests/pipelines/test_pipelines.py +++ b/tests/pipelines/test_pipelines.py @@ -167,9 +167,9 @@ def test_one_request_upon_cached(self): download_requests = [r.method for r in m.request_history] assert download_requests.count("HEAD") == 15, "15 calls to files" assert download_requests.count("GET") == 17, "15 calls to files + model_info + model_index.json" - assert len(download_requests) == 32, ( - "2 calls per file (15 files) + send_telemetry, model_info and model_index.json" - ) + assert ( + len(download_requests) == 32 + ), "2 calls per file (15 files) + send_telemetry, model_info and model_index.json" with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( @@ -179,9 +179,9 @@ def test_one_request_upon_cached(self): cache_requests = [r.method for r in m.request_history] assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" assert cache_requests.count("GET") == 1, "model info is only GET" - assert len(cache_requests) == 2, ( - "We should call only `model_info` to check for _commit hash and `send_telemetry`" - ) + assert ( + len(cache_requests) == 2 + ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" def test_less_downloads_passed_object(self): with tempfile.TemporaryDirectory() as tmpdirname: @@ -217,9 +217,9 @@ def test_less_downloads_passed_object_calls(self): assert download_requests.count("HEAD") == 13, "13 calls to files" # 17 - 2 because no call to config or model file for `safety_checker` assert download_requests.count("GET") == 15, "13 calls to files + model_info + model_index.json" - assert len(download_requests) == 28, ( - "2 calls per file (13 files) + send_telemetry, model_info and model_index.json" - ) + assert ( + len(download_requests) == 28 + ), "2 calls per file (13 files) + send_telemetry, model_info and model_index.json" with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( @@ -229,9 +229,9 @@ def test_less_downloads_passed_object_calls(self): cache_requests = [r.method for r in m.request_history] assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" assert cache_requests.count("GET") == 1, "model info is only GET" - assert len(cache_requests) == 2, ( - "We should call only `model_info` to check for _commit hash and `send_telemetry`" - ) + assert ( + len(cache_requests) == 2 + ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index be5245796b35..b69669464d90 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -191,12 +191,12 @@ def test_freeu(self): inputs["output_type"] = "np" output_no_freeu = pipe(**inputs)[0] - assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), ( - "Enabling of FreeU should lead to different results." - ) - assert np.allclose(output, output_no_freeu, atol=1e-2), ( - f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}." - ) + assert not np.allclose( + output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1] + ), "Enabling of FreeU should lead to different results." + assert np.allclose( + output, output_no_freeu, atol=1e-2 + ), f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}." def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator @@ -217,12 +217,12 @@ def test_fused_qkv_projections(self): and hasattr(component, "original_attn_processors") and component.original_attn_processors is not None ): - assert check_qkv_fusion_processors_exist(component), ( - "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - ) - assert check_qkv_fusion_matches_attn_procs_length(component, component.original_attn_processors), ( - "Something wrong with the attention processors concerning the fused QKV projections." - ) + assert check_qkv_fusion_processors_exist( + component + ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_matches_attn_procs_length( + component, component.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False @@ -235,15 +235,15 @@ def test_fused_qkv_projections(self): image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] - assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( - "Fusion of QKV projections shouldn't affect the outputs." - ) - assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - ) - assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( - "Original outputs should match when fused QKV projections are disabled." - ) + assert np.allclose( + original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 + ), "Fusion of QKV projections shouldn't affect the outputs." + assert np.allclose( + image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + assert np.allclose( + original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 + ), "Original outputs should match when fused QKV projections are disabled." class IPAdapterTesterMixin: @@ -909,9 +909,9 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): for component in pipe_original.components.values(): if hasattr(component, "attn_processors"): - assert all(type(proc) == AttnProcessor for proc in component.attn_processors.values()), ( - "`from_pipe` changed the attention processor in original pipeline." - ) + assert all( + type(proc) == AttnProcessor for proc in component.attn_processors.values() + ), "`from_pipe` changed the attention processor in original pipeline." @require_accelerator @require_accelerate_version_greater("0.14.0") @@ -2569,12 +2569,12 @@ def test_pyramid_attention_broadcast_inference(self, expected_atol: float = 0.2) image_slice_pab_disabled = output.flatten() image_slice_pab_disabled = np.concatenate((image_slice_pab_disabled[:8], image_slice_pab_disabled[-8:])) - assert np.allclose(original_image_slice, image_slice_pab_enabled, atol=expected_atol), ( - "PAB outputs should not differ much in specified timestep range." - ) - assert np.allclose(original_image_slice, image_slice_pab_disabled, atol=1e-4), ( - "Outputs from normal inference and after disabling cache should not differ." - ) + assert np.allclose( + original_image_slice, image_slice_pab_enabled, atol=expected_atol + ), "PAB outputs should not differ much in specified timestep range." + assert np.allclose( + original_image_slice, image_slice_pab_disabled, atol=1e-4 + ), "Outputs from normal inference and after disabling cache should not differ." class FasterCacheTesterMixin: @@ -2639,12 +2639,12 @@ def run_forward(pipe): output = run_forward(pipe).flatten() image_slice_faster_cache_disabled = np.concatenate((output[:8], output[-8:])) - assert np.allclose(original_image_slice, image_slice_faster_cache_enabled, atol=expected_atol), ( - "FasterCache outputs should not differ much in specified timestep range." - ) - assert np.allclose(original_image_slice, image_slice_faster_cache_disabled, atol=1e-4), ( - "Outputs from normal inference and after disabling cache should not differ." - ) + assert np.allclose( + original_image_slice, image_slice_faster_cache_enabled, atol=expected_atol + ), "FasterCache outputs should not differ much in specified timestep range." + assert np.allclose( + original_image_slice, image_slice_faster_cache_disabled, atol=1e-4 + ), "Outputs from normal inference and after disabling cache should not differ." def test_faster_cache_state(self): from diffusers.hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK diff --git a/tests/pipelines/wuerstchen/test_wuerstchen_combined.py b/tests/pipelines/wuerstchen/test_wuerstchen_combined.py index fa544c91f2d9..084d62a8c613 100644 --- a/tests/pipelines/wuerstchen/test_wuerstchen_combined.py +++ b/tests/pipelines/wuerstchen/test_wuerstchen_combined.py @@ -191,12 +191,12 @@ def test_wuerstchen(self): expected_slice = np.array([0.7616304, 0.0, 1.0, 0.0, 1.0, 0.0, 0.05925313, 0.0, 0.951898]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - ) - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( - f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - ) + assert ( + np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert ( + np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_accelerator def test_offloads(self): diff --git a/tests/schedulers/test_scheduler_dpm_multi.py b/tests/schedulers/test_scheduler_dpm_multi.py index 28c354709dc9..55b3202ad0be 100644 --- a/tests/schedulers/test_scheduler_dpm_multi.py +++ b/tests/schedulers/test_scheduler_dpm_multi.py @@ -357,9 +357,9 @@ def test_custom_timesteps(self): prediction_type=prediction_type, final_sigmas_type=final_sigmas_type, ) - assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( - f"Scheduler outputs are not identical for algorithm_type: {algorithm_type}, prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" - ) + assert ( + torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 + ), f"Scheduler outputs are not identical for algorithm_type: {algorithm_type}, prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True) diff --git a/tests/schedulers/test_scheduler_dpm_single.py b/tests/schedulers/test_scheduler_dpm_single.py index 0756a5ed71ff..7cbaa5cc5e8d 100644 --- a/tests/schedulers/test_scheduler_dpm_single.py +++ b/tests/schedulers/test_scheduler_dpm_single.py @@ -345,9 +345,9 @@ def test_custom_timesteps(self): lower_order_final=lower_order_final, final_sigmas_type=final_sigmas_type, ) - assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( - f"Scheduler outputs are not identical for prediction_type: {prediction_type}, lower_order_final: {lower_order_final} and final_sigmas_type: {final_sigmas_type}" - ) + assert ( + torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 + ), f"Scheduler outputs are not identical for prediction_type: {prediction_type}, lower_order_final: {lower_order_final} and final_sigmas_type: {final_sigmas_type}" def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True) diff --git a/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py b/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py index 8525ce61c40d..e97d64ec5f1d 100644 --- a/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py +++ b/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py @@ -188,9 +188,9 @@ def test_solver_order_and_type(self): prediction_type=prediction_type, algorithm_type=algorithm_type, ) - assert not torch.isnan(sample).any(), ( - f"Samples have nan numbers, {order}, {solver_type}, {prediction_type}, {algorithm_type}" - ) + assert ( + not torch.isnan(sample).any() + ), f"Samples have nan numbers, {order}, {solver_type}, {prediction_type}, {algorithm_type}" def test_lower_order_final(self): self.check_over_configs(lower_order_final=True) diff --git a/tests/schedulers/test_scheduler_euler.py b/tests/schedulers/test_scheduler_euler.py index 01e173a631cd..4c7e02442cd0 100644 --- a/tests/schedulers/test_scheduler_euler.py +++ b/tests/schedulers/test_scheduler_euler.py @@ -245,9 +245,9 @@ def test_custom_timesteps(self): interpolation_type=interpolation_type, final_sigmas_type=final_sigmas_type, ) - assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( - f"Scheduler outputs are not identical for prediction_type: {prediction_type}, interpolation_type: {interpolation_type} and final_sigmas_type: {final_sigmas_type}" - ) + assert ( + torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 + ), f"Scheduler outputs are not identical for prediction_type: {prediction_type}, interpolation_type: {interpolation_type} and final_sigmas_type: {final_sigmas_type}" def test_custom_sigmas(self): for prediction_type in ["epsilon", "sample", "v_prediction"]: @@ -260,9 +260,9 @@ def test_custom_sigmas(self): prediction_type=prediction_type, final_sigmas_type=final_sigmas_type, ) - assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( - f"Scheduler outputs are not identical for prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" - ) + assert ( + torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 + ), f"Scheduler outputs are not identical for prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True) diff --git a/tests/schedulers/test_scheduler_heun.py b/tests/schedulers/test_scheduler_heun.py index 90012f5525ab..9e060c6d476f 100644 --- a/tests/schedulers/test_scheduler_heun.py +++ b/tests/schedulers/test_scheduler_heun.py @@ -216,9 +216,9 @@ def test_custom_timesteps(self): prediction_type=prediction_type, timestep_spacing=timestep_spacing, ) - assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( - f"Scheduler outputs are not identical for prediction_type: {prediction_type}, timestep_spacing: {timestep_spacing}" - ) + assert ( + torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 + ), f"Scheduler outputs are not identical for prediction_type: {prediction_type}, timestep_spacing: {timestep_spacing}" def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True) diff --git a/tests/single_file/single_file_testing_utils.py b/tests/single_file/single_file_testing_utils.py index 4e1713c9ceb1..4e7bc0af6842 100644 --- a/tests/single_file/single_file_testing_utils.py +++ b/tests/single_file/single_file_testing_utils.py @@ -72,9 +72,9 @@ def _compare_component_configs(self, pipe, single_file_pipe): continue assert component_name in pipe.components, f"single file {component_name} not found in pretrained pipeline" - assert isinstance(component, pipe.components[component_name].__class__), ( - f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" - ) + assert isinstance( + component, pipe.components[component_name].__class__ + ), f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" for param_name, param_value in component.config.items(): if param_name in PARAMS_TO_IGNORE: @@ -85,9 +85,9 @@ def _compare_component_configs(self, pipe, single_file_pipe): if param_name == "upcast_attention" and pipe.components[component_name].config[param_name] is None: pipe.components[component_name].config[param_name] = param_value - assert pipe.components[component_name].config[param_name] == param_value, ( - f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" - ) + assert ( + pipe.components[component_name].config[param_name] == param_value + ), f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" def test_single_file_components(self, pipe=None, single_file_pipe=None): single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( @@ -253,9 +253,9 @@ def _compare_component_configs(self, pipe, single_file_pipe): continue assert component_name in pipe.components, f"single file {component_name} not found in pretrained pipeline" - assert isinstance(component, pipe.components[component_name].__class__), ( - f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" - ) + assert isinstance( + component, pipe.components[component_name].__class__ + ), f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" for param_name, param_value in component.config.items(): if param_name in PARAMS_TO_IGNORE: @@ -266,9 +266,9 @@ def _compare_component_configs(self, pipe, single_file_pipe): if param_name == "upcast_attention" and pipe.components[component_name].config[param_name] is None: pipe.components[component_name].config[param_name] = param_value - assert pipe.components[component_name].config[param_name] == param_value, ( - f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" - ) + assert ( + pipe.components[component_name].config[param_name] == param_value + ), f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" def test_single_file_components(self, pipe=None, single_file_pipe=None): single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( diff --git a/tests/single_file/test_lumina2_transformer.py b/tests/single_file/test_lumina2_transformer.py index d3ffd4fc3a55..78e68c4c2df0 100644 --- a/tests/single_file/test_lumina2_transformer.py +++ b/tests/single_file/test_lumina2_transformer.py @@ -60,9 +60,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: diff --git a/tests/single_file/test_model_autoencoder_dc_single_file.py b/tests/single_file/test_model_autoencoder_dc_single_file.py index 31b2eb6e36b0..b1faeb78776b 100644 --- a/tests/single_file/test_model_autoencoder_dc_single_file.py +++ b/tests/single_file/test_model_autoencoder_dc_single_file.py @@ -87,9 +87,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between pretrained loading and single file loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between pretrained loading and single file loading" def test_single_file_in_type_variant_components(self): # `in` variant checkpoints require passing in a `config` parameter @@ -106,9 +106,9 @@ def test_single_file_in_type_variant_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between pretrained loading and single file loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between pretrained loading and single file loading" def test_single_file_mix_type_variant_components(self): repo_id = "mit-han-lab/dc-ae-f128c512-mix-1.0-diffusers" @@ -121,6 +121,6 @@ def test_single_file_mix_type_variant_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between pretrained loading and single file loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between pretrained loading and single file loading" diff --git a/tests/single_file/test_model_controlnet_single_file.py b/tests/single_file/test_model_controlnet_single_file.py index 3580d73531a3..bfcb802380a6 100644 --- a/tests/single_file/test_model_controlnet_single_file.py +++ b/tests/single_file/test_model_controlnet_single_file.py @@ -58,9 +58,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" def test_single_file_arguments(self): model_default = self.model_class.from_single_file(self.ckpt_path) diff --git a/tests/single_file/test_model_flux_transformer_single_file.py b/tests/single_file/test_model_flux_transformer_single_file.py index bf11faaa9c0e..0ec97db26a9e 100644 --- a/tests/single_file/test_model_flux_transformer_single_file.py +++ b/tests/single_file/test_model_flux_transformer_single_file.py @@ -58,9 +58,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: diff --git a/tests/single_file/test_model_motion_adapter_single_file.py b/tests/single_file/test_model_motion_adapter_single_file.py index a747f16dc1db..b195f25d094b 100644 --- a/tests/single_file/test_model_motion_adapter_single_file.py +++ b/tests/single_file/test_model_motion_adapter_single_file.py @@ -40,9 +40,9 @@ def test_single_file_components_version_v1_5(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between pretrained loading and single file loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between pretrained loading and single file loading" def test_single_file_components_version_v1_5_2(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt" @@ -55,9 +55,9 @@ def test_single_file_components_version_v1_5_2(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between pretrained loading and single file loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between pretrained loading and single file loading" def test_single_file_components_version_v1_5_3(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt" @@ -70,9 +70,9 @@ def test_single_file_components_version_v1_5_3(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between pretrained loading and single file loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between pretrained loading and single file loading" def test_single_file_components_version_sdxl_beta(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt" @@ -85,6 +85,6 @@ def test_single_file_components_version_sdxl_beta(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between pretrained loading and single file loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between pretrained loading and single file loading" diff --git a/tests/single_file/test_model_sd_cascade_unet_single_file.py b/tests/single_file/test_model_sd_cascade_unet_single_file.py index 92b371c3fb41..08b04e3cd7e8 100644 --- a/tests/single_file/test_model_sd_cascade_unet_single_file.py +++ b/tests/single_file/test_model_sd_cascade_unet_single_file.py @@ -60,9 +60,9 @@ def test_single_file_components_stage_b(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" def test_single_file_components_stage_b_lite(self): model_single_file = StableCascadeUNet.from_single_file( @@ -77,9 +77,9 @@ def test_single_file_components_stage_b_lite(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" def test_single_file_components_stage_c(self): model_single_file = StableCascadeUNet.from_single_file( @@ -94,9 +94,9 @@ def test_single_file_components_stage_c(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" def test_single_file_components_stage_c_lite(self): model_single_file = StableCascadeUNet.from_single_file( @@ -111,6 +111,6 @@ def test_single_file_components_stage_c_lite(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" diff --git a/tests/single_file/test_model_vae_single_file.py b/tests/single_file/test_model_vae_single_file.py index bba1726ae380..9db4cddb3c9d 100644 --- a/tests/single_file/test_model_vae_single_file.py +++ b/tests/single_file/test_model_vae_single_file.py @@ -91,9 +91,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between pretrained loading and single file loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between pretrained loading and single file loading" def test_single_file_arguments(self): model_default = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id) diff --git a/tests/single_file/test_model_wan_autoencoder_single_file.py b/tests/single_file/test_model_wan_autoencoder_single_file.py index 7f0e1c1a4b0b..f5720ddd3964 100644 --- a/tests/single_file/test_model_wan_autoencoder_single_file.py +++ b/tests/single_file/test_model_wan_autoencoder_single_file.py @@ -56,6 +56,6 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" diff --git a/tests/single_file/test_model_wan_transformer3d_single_file.py b/tests/single_file/test_model_wan_transformer3d_single_file.py index 36f0919cacb5..9b938aa1754c 100644 --- a/tests/single_file/test_model_wan_transformer3d_single_file.py +++ b/tests/single_file/test_model_wan_transformer3d_single_file.py @@ -57,9 +57,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" @require_big_gpu_with_torch_cuda @@ -88,6 +88,6 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" diff --git a/tests/single_file/test_sana_transformer.py b/tests/single_file/test_sana_transformer.py index 802ca37abfc3..7695e1577711 100644 --- a/tests/single_file/test_sana_transformer.py +++ b/tests/single_file/test_sana_transformer.py @@ -47,9 +47,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert model.config[param_name] == param_value, ( - f"{param_name} differs between single file loading and pretrained loading" - ) + assert ( + model.config[param_name] == param_value + ), f"{param_name} differs between single file loading and pretrained loading" def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: diff --git a/utils/fetch_latest_release_branch.py b/utils/fetch_latest_release_branch.py index ba5ab70846a6..41d5c472cbb9 100644 --- a/utils/fetch_latest_release_branch.py +++ b/utils/fetch_latest_release_branch.py @@ -17,6 +17,7 @@ import requests from packaging.version import parse + # GitHub repository details USER = "huggingface" REPO = "diffusers" From 6fcec9d85aa2b1f9bc26e1537e086c45cd014c1c Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Apr 2025 17:24:51 +0530 Subject: [PATCH 3/3] fix --- .../train_dreambooth_lora_flux_advanced.py | 6 +- .../train_dreambooth_lora_sd15_advanced.py | 12 ++-- .../train_dreambooth_lora_sdxl_advanced.py | 12 ++-- examples/community/pipeline_prompt2prompt.py | 12 ++-- .../train_custom_diffusion.py | 24 +++---- .../flux-control/train_control_lora_flux.py | 6 +- examples/model_search/pipeline_easy.py | 6 +- examples/research_projects/anytext/anytext.py | 6 +- .../anytext/ocr_recog/RecSVTR.py | 6 +- .../train_multi_subject_dreambooth.py | 12 ++-- .../textual_inversion.py | 6 +- .../textual_inversion/textual_inversion.py | 6 +- .../textual_inversion/textual_inversion.py | 6 +- .../textual_inversion_sdxl.py | 12 ++-- examples/vqgan/train_vqgan.py | 12 ++-- .../convert_dance_diffusion_to_diffusers.py | 12 ++-- ...vert_hunyuandit_controlnet_to_diffusers.py | 6 +- scripts/convert_hunyuandit_to_diffusers.py | 6 +- scripts/convert_k_upscaler_to_diffusers.py | 6 +- scripts/convert_mochi_to_diffusers.py | 12 ++-- scripts/convert_svd_to_diffusers.py | 12 ++-- scripts/convert_vq_diffusion_to_diffusers.py | 24 +++---- .../loaders/lora_conversion_utils.py | 66 ++++++++--------- .../models/transformers/transformer_2d.py | 6 +- src/diffusers/pipelines/free_noise_utils.py | 6 +- .../pipelines/omnigen/processor_omnigen.py | 12 ++-- src/diffusers/pipelines/shap_e/renderer.py | 12 ++-- src/diffusers/quantizers/base.py | 12 ++-- tests/models/test_modeling_common.py | 12 ++-- .../test_models_transformer_sd3.py | 12 ++-- .../unets/test_models_unet_2d_condition.py | 36 +++++----- tests/others/test_image_processor.py | 30 ++++---- tests/pipelines/amused/test_amused.py | 3 +- tests/pipelines/amused/test_amused_img2img.py | 3 +- tests/pipelines/amused/test_amused_inpaint.py | 3 +- .../aura_flow/test_pipeline_aura_flow.py | 24 +++---- .../blipdiffusion/test_blipdiffusion.py | 6 +- tests/pipelines/cogvideo/test_cogvideox.py | 24 +++---- .../cogvideo/test_cogvideox_fun_control.py | 24 +++---- .../cogvideo/test_cogvideox_image2video.py | 24 +++---- .../cogvideo/test_cogvideox_video2video.py | 24 +++---- .../test_controlnet_blip_diffusion.py | 6 +- .../controlnet_flux/test_controlnet_flux.py | 6 +- .../test_controlnet_flux_img2img.py | 24 +++---- .../test_controlnet_hunyuandit.py | 6 +- .../test_controlnet_inpaint_sd3.py | 6 +- .../controlnet_sd3/test_controlnet_sd3.py | 6 +- tests/pipelines/flux/test_pipeline_flux.py | 24 +++---- .../flux/test_pipeline_flux_control.py | 24 +++---- .../test_pipeline_flux_control_inpaint.py | 24 +++---- .../pipelines/hunyuandit/test_hunyuan_dit.py | 24 +++---- tests/pipelines/kandinsky/test_kandinsky.py | 12 ++-- .../kandinsky/test_kandinsky_combined.py | 36 +++++----- .../kandinsky/test_kandinsky_img2img.py | 12 ++-- .../kandinsky/test_kandinsky_inpaint.py | 12 ++-- .../pipelines/kandinsky2_2/test_kandinsky.py | 12 ++-- .../kandinsky2_2/test_kandinsky_combined.py | 36 +++++----- .../kandinsky2_2/test_kandinsky_controlnet.py | 12 ++-- .../test_kandinsky_controlnet_img2img.py | 12 ++-- .../kandinsky2_2/test_kandinsky_img2img.py | 12 ++-- .../kandinsky2_2/test_kandinsky_inpaint.py | 12 ++-- tests/pipelines/kandinsky3/test_kandinsky3.py | 6 +- .../kandinsky3/test_kandinsky3_img2img.py | 6 +- tests/pipelines/pag/test_pag_animatediff.py | 6 +- tests/pipelines/pag/test_pag_controlnet_sd.py | 6 +- .../pag/test_pag_controlnet_sd_inpaint.py | 6 +- .../pipelines/pag/test_pag_controlnet_sdxl.py | 6 +- .../pag/test_pag_controlnet_sdxl_img2img.py | 6 +- tests/pipelines/pag/test_pag_hunyuan_dit.py | 24 +++---- tests/pipelines/pag/test_pag_kolors.py | 6 +- tests/pipelines/pag/test_pag_pixart_sigma.py | 6 +- tests/pipelines/pag/test_pag_sana.py | 6 +- tests/pipelines/pag/test_pag_sd.py | 18 ++--- tests/pipelines/pag/test_pag_sd3.py | 30 ++++---- tests/pipelines/pag/test_pag_sd3_img2img.py | 18 ++--- tests/pipelines/pag/test_pag_sd_img2img.py | 18 ++--- tests/pipelines/pag/test_pag_sd_inpaint.py | 12 ++-- tests/pipelines/pag/test_pag_sdxl.py | 18 ++--- tests/pipelines/pag/test_pag_sdxl_img2img.py | 18 ++--- tests/pipelines/pag/test_pag_sdxl_inpaint.py | 18 ++--- tests/pipelines/pixart_sigma/test_pixart.py | 24 +++---- .../test_stable_cascade_combined.py | 12 ++-- .../stable_diffusion/test_stable_diffusion.py | 48 ++++++------- .../test_pipeline_stable_diffusion_3.py | 24 +++---- .../test_stable_diffusion_xl.py | 30 ++++---- .../test_stable_diffusion_xl_inpaint.py | 12 ++-- tests/pipelines/test_pipelines.py | 24 +++---- tests/pipelines/test_pipelines_common.py | 72 +++++++++---------- .../wuerstchen/test_wuerstchen_combined.py | 12 ++-- tests/schedulers/test_scheduler_dpm_multi.py | 6 +- tests/schedulers/test_scheduler_dpm_single.py | 6 +- .../test_scheduler_edm_dpmsolver_multistep.py | 6 +- tests/schedulers/test_scheduler_euler.py | 12 ++-- tests/schedulers/test_scheduler_heun.py | 6 +- .../single_file/single_file_testing_utils.py | 24 +++---- tests/single_file/test_lumina2_transformer.py | 6 +- .../test_model_autoencoder_dc_single_file.py | 18 ++--- .../test_model_controlnet_single_file.py | 6 +- ...test_model_flux_transformer_single_file.py | 6 +- .../test_model_motion_adapter_single_file.py | 24 +++---- .../test_model_sd_cascade_unet_single_file.py | 24 +++---- .../single_file/test_model_vae_single_file.py | 6 +- .../test_model_wan_autoencoder_single_file.py | 6 +- ...est_model_wan_transformer3d_single_file.py | 12 ++-- tests/single_file/test_sana_transformer.py | 6 +- 105 files changed, 769 insertions(+), 776 deletions(-) diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py index 82075280c329..dc774d145c83 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py @@ -839,9 +839,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): idx = 0 for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders): assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings." - assert all( - isinstance(tok, str) for tok in inserting_toks - ), "All elements in inserting_toks should be strings." + assert all(isinstance(tok, str) for tok in inserting_toks), ( + "All elements in inserting_toks should be strings." + ) self.inserting_toks = inserting_toks special_tokens_dict = {"additional_special_tokens": self.inserting_toks} diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py index 32df4c4e16a8..95ba53391cf3 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py @@ -725,9 +725,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): idx = 0 for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders): assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings." - assert all( - isinstance(tok, str) for tok in inserting_toks - ), "All elements in inserting_toks should be strings." + assert all(isinstance(tok, str) for tok in inserting_toks), ( + "All elements in inserting_toks should be strings." + ) self.inserting_toks = inserting_toks special_tokens_dict = {"additional_special_tokens": self.inserting_toks} @@ -747,9 +747,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): .to(dtype=self.dtype) * std_token_embedding ) - self.embeddings_settings[ - f"original_embeddings_{idx}" - ] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone() + self.embeddings_settings[f"original_embeddings_{idx}"] = ( + text_encoder.text_model.embeddings.token_embedding.weight.data.clone() + ) self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding inu = torch.ones((len(tokenizer),), dtype=torch.bool) diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py index f1a7f0224011..236dc20d621c 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py @@ -890,9 +890,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): idx = 0 for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders): assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings." - assert all( - isinstance(tok, str) for tok in inserting_toks - ), "All elements in inserting_toks should be strings." + assert all(isinstance(tok, str) for tok in inserting_toks), ( + "All elements in inserting_toks should be strings." + ) self.inserting_toks = inserting_toks special_tokens_dict = {"additional_special_tokens": self.inserting_toks} @@ -912,9 +912,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]): .to(dtype=self.dtype) * std_token_embedding ) - self.embeddings_settings[ - f"original_embeddings_{idx}" - ] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone() + self.embeddings_settings[f"original_embeddings_{idx}"] = ( + text_encoder.text_model.embeddings.token_embedding.weight.data.clone() + ) self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding inu = torch.ones((len(tokenizer),), dtype=torch.bool) diff --git a/examples/community/pipeline_prompt2prompt.py b/examples/community/pipeline_prompt2prompt.py index 736f00799eae..b9985542ccf7 100644 --- a/examples/community/pipeline_prompt2prompt.py +++ b/examples/community/pipeline_prompt2prompt.py @@ -907,12 +907,12 @@ def create_controller( # reweight if edit_type == "reweight": - assert ( - equalizer_words is not None and equalizer_strengths is not None - ), "To use reweight edit, please specify equalizer_words and equalizer_strengths." - assert len(equalizer_words) == len( - equalizer_strengths - ), "equalizer_words and equalizer_strengths must be of same length." + assert equalizer_words is not None and equalizer_strengths is not None, ( + "To use reweight edit, please specify equalizer_words and equalizer_strengths." + ) + assert len(equalizer_words) == len(equalizer_strengths), ( + "equalizer_words and equalizer_strengths must be of same length." + ) equalizer = get_equalizer(prompts[1], equalizer_words, equalizer_strengths, tokenizer=tokenizer) return AttentionReweight( prompts, diff --git a/examples/custom_diffusion/train_custom_diffusion.py b/examples/custom_diffusion/train_custom_diffusion.py index faefd980ee52..fa2959cf41a1 100644 --- a/examples/custom_diffusion/train_custom_diffusion.py +++ b/examples/custom_diffusion/train_custom_diffusion.py @@ -731,18 +731,18 @@ def main(args): if not class_images_dir.exists(): class_images_dir.mkdir(parents=True, exist_ok=True) if args.real_prior: - assert ( - class_images_dir / "images" - ).exists(), f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' - assert ( - len(list((class_images_dir / "images").iterdir())) == args.num_class_images - ), f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' - assert ( - class_images_dir / "caption.txt" - ).exists(), f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' - assert ( - class_images_dir / "images.txt" - ).exists(), f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' + assert (class_images_dir / "images").exists(), ( + f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' + ) + assert len(list((class_images_dir / "images").iterdir())) == args.num_class_images, ( + f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' + ) + assert (class_images_dir / "caption.txt").exists(), ( + f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' + ) + assert (class_images_dir / "images.txt").exists(), ( + f'Please run: python retrieve.py --class_prompt "{concept["class_prompt"]}" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}' + ) concept["class_prompt"] = os.path.join(class_images_dir, "caption.txt") concept["class_data_dir"] = os.path.join(class_images_dir, "images.txt") args.concepts_list[i] = concept diff --git a/examples/flux-control/train_control_lora_flux.py b/examples/flux-control/train_control_lora_flux.py index 269e1b4477a0..2a9bfd949cde 100644 --- a/examples/flux-control/train_control_lora_flux.py +++ b/examples/flux-control/train_control_lora_flux.py @@ -91,9 +91,9 @@ def log_validation(flux_transformer, args, accelerator, weight_dtype, step, is_f torch_dtype=weight_dtype, ) pipeline.load_lora_weights(args.output_dir) - assert ( - pipeline.transformer.config.in_channels == initial_channels * 2 - ), f"{pipeline.transformer.config.in_channels=}" + assert pipeline.transformer.config.in_channels == initial_channels * 2, ( + f"{pipeline.transformer.config.in_channels=}" + ) pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) diff --git a/examples/model_search/pipeline_easy.py b/examples/model_search/pipeline_easy.py index a8add8311006..b82e98fb71ff 100644 --- a/examples/model_search/pipeline_easy.py +++ b/examples/model_search/pipeline_easy.py @@ -1081,9 +1081,9 @@ def auto_load_textual_inversion( f"textual_inversion_path: {search_word} -> {textual_inversion_path.model_status.site_url}" ) - pretrained_model_name_or_paths[ - pretrained_model_name_or_paths.index(search_word) - ] = textual_inversion_path.model_path + pretrained_model_name_or_paths[pretrained_model_name_or_paths.index(search_word)] = ( + textual_inversion_path.model_path + ) self.load_textual_inversion( pretrained_model_name_or_paths, token=tokens, tokenizer=tokenizer, text_encoder=text_encoder, **kwargs diff --git a/examples/research_projects/anytext/anytext.py b/examples/research_projects/anytext/anytext.py index 5c30b24efe88..2e96014c4193 100644 --- a/examples/research_projects/anytext/anytext.py +++ b/examples/research_projects/anytext/anytext.py @@ -187,9 +187,9 @@ def get_clip_token_for_string(tokenizer, string): return_tensors="pt", ) tokens = batch_encoding["input_ids"] - assert ( - torch.count_nonzero(tokens - 49407) == 2 - ), f"String '{string}' maps to more than a single token. Please use another string" + assert torch.count_nonzero(tokens - 49407) == 2, ( + f"String '{string}' maps to more than a single token. Please use another string" + ) return tokens[0, 1] diff --git a/examples/research_projects/anytext/ocr_recog/RecSVTR.py b/examples/research_projects/anytext/ocr_recog/RecSVTR.py index 590a96995b26..3dc813b84a55 100644 --- a/examples/research_projects/anytext/ocr_recog/RecSVTR.py +++ b/examples/research_projects/anytext/ocr_recog/RecSVTR.py @@ -312,9 +312,9 @@ def __init__(self, img_size=(32, 100), in_channels=3, embed_dim=768, sub_num=2): def forward(self, x): B, C, H, W = x.shape - assert ( - H == self.img_size[0] and W == self.img_size[1] - ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + assert H == self.img_size[0] and W == self.img_size[1], ( + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + ) x = self.proj(x).flatten(2).permute(0, 2, 1) return x diff --git a/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py b/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py index 0f507b26d6a8..57c555e43fd8 100644 --- a/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py +++ b/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py @@ -763,9 +763,9 @@ def main(args): # Parse instance and class inputs, and double check that lengths match instance_data_dir = args.instance_data_dir.split(",") instance_prompt = args.instance_prompt.split(",") - assert all( - x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt)] - ), "Instance data dir and prompt inputs are not of the same length." + assert all(x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt)]), ( + "Instance data dir and prompt inputs are not of the same length." + ) if args.with_prior_preservation: class_data_dir = args.class_data_dir.split(",") @@ -788,9 +788,9 @@ def main(args): negative_validation_prompts.append(None) args.validation_negative_prompt = negative_validation_prompts - assert num_of_validation_prompts == len( - negative_validation_prompts - ), "The length of negative prompts for validation is greater than the number of validation prompts." + assert num_of_validation_prompts == len(negative_validation_prompts), ( + "The length of negative prompts for validation is greater than the number of validation prompts." + ) args.validation_inference_steps = [args.validation_inference_steps] * num_of_validation_prompts args.validation_guidance_scale = [args.validation_guidance_scale] * num_of_validation_prompts diff --git a/examples/research_projects/multi_token_textual_inversion/textual_inversion.py b/examples/research_projects/multi_token_textual_inversion/textual_inversion.py index 19432142f541..75dcfccbd5b8 100644 --- a/examples/research_projects/multi_token_textual_inversion/textual_inversion.py +++ b/examples/research_projects/multi_token_textual_inversion/textual_inversion.py @@ -830,9 +830,9 @@ def main(): # Let's make sure we don't update any embedding weights besides the newly added token index_no_updates = get_mask(tokenizer, accelerator) with torch.no_grad(): - accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ - index_no_updates - ] = orig_embeds_params[index_no_updates] + accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = ( + orig_embeds_params[index_no_updates] + ) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: diff --git a/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py b/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py index 7f5dc8ece9fc..a881b06a94dc 100644 --- a/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py +++ b/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py @@ -886,9 +886,9 @@ def main(): index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False with torch.no_grad(): - accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ - index_no_updates - ] = orig_embeds_params[index_no_updates] + accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = ( + orig_embeds_params[index_no_updates] + ) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index 757a12045f10..51e220828cdf 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -910,9 +910,9 @@ def main(): index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False with torch.no_grad(): - accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ - index_no_updates - ] = orig_embeds_params[index_no_updates] + accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = ( + orig_embeds_params[index_no_updates] + ) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: diff --git a/examples/textual_inversion/textual_inversion_sdxl.py b/examples/textual_inversion/textual_inversion_sdxl.py index 11463943c448..f32c729195b0 100644 --- a/examples/textual_inversion/textual_inversion_sdxl.py +++ b/examples/textual_inversion/textual_inversion_sdxl.py @@ -965,12 +965,12 @@ def main(): index_no_updates_2[min(placeholder_token_ids_2) : max(placeholder_token_ids_2) + 1] = False with torch.no_grad(): - accelerator.unwrap_model(text_encoder_1).get_input_embeddings().weight[ - index_no_updates - ] = orig_embeds_params[index_no_updates] - accelerator.unwrap_model(text_encoder_2).get_input_embeddings().weight[ - index_no_updates_2 - ] = orig_embeds_params_2[index_no_updates_2] + accelerator.unwrap_model(text_encoder_1).get_input_embeddings().weight[index_no_updates] = ( + orig_embeds_params[index_no_updates] + ) + accelerator.unwrap_model(text_encoder_2).get_input_embeddings().weight[index_no_updates_2] = ( + orig_embeds_params_2[index_no_updates_2] + ) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: diff --git a/examples/vqgan/train_vqgan.py b/examples/vqgan/train_vqgan.py index 992722fa7a78..33d234da52d7 100644 --- a/examples/vqgan/train_vqgan.py +++ b/examples/vqgan/train_vqgan.py @@ -653,15 +653,15 @@ def main(): try: # Gets the resolution of the timm transformation after centercrop timm_centercrop_transform = timm_transform.transforms[1] - assert isinstance( - timm_centercrop_transform, transforms.CenterCrop - ), f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." + assert isinstance(timm_centercrop_transform, transforms.CenterCrop), ( + f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." + ) timm_model_resolution = timm_centercrop_transform.size[0] # Gets final normalization timm_model_normalization = timm_transform.transforms[-1] - assert isinstance( - timm_model_normalization, transforms.Normalize - ), f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." + assert isinstance(timm_model_normalization, transforms.Normalize), ( + f"Timm model {timm_model} is currently incompatible with this script. Try vgg19." + ) except AssertionError as e: raise NotImplementedError(e) # Enable flash attention if asked diff --git a/scripts/convert_dance_diffusion_to_diffusers.py b/scripts/convert_dance_diffusion_to_diffusers.py index f9caa50dfc9b..e269a49070cc 100755 --- a/scripts/convert_dance_diffusion_to_diffusers.py +++ b/scripts/convert_dance_diffusion_to_diffusers.py @@ -261,9 +261,9 @@ def main(args): model_name = args.model_path.split("/")[-1].split(".")[0] if not os.path.isfile(args.model_path): - assert ( - model_name == args.model_path - ), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}" + assert model_name == args.model_path, ( + f"Make sure to provide one of the official model names {MODELS_MAP.keys()}" + ) args.model_path = download(model_name) sample_rate = MODELS_MAP[model_name]["sample_rate"] @@ -290,9 +290,9 @@ def main(args): assert all(k.endswith("kernel") for k in list(diffusers_minus_renamed)), f"Problem with {diffusers_minus_renamed}" for key, value in renamed_state_dict.items(): - assert ( - diffusers_state_dict[key].squeeze().shape == value.squeeze().shape - ), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}" + assert diffusers_state_dict[key].squeeze().shape == value.squeeze().shape, ( + f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}" + ) if key == "time_proj.weight": value = value.squeeze() diff --git a/scripts/convert_hunyuandit_controlnet_to_diffusers.py b/scripts/convert_hunyuandit_controlnet_to_diffusers.py index 1c8383690890..5cef46c98983 100644 --- a/scripts/convert_hunyuandit_controlnet_to_diffusers.py +++ b/scripts/convert_hunyuandit_controlnet_to_diffusers.py @@ -21,9 +21,9 @@ def main(args): model_config = HunyuanDiT2DControlNetModel.load_config( "Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers", subfolder="transformer" ) - model_config[ - "use_style_cond_and_image_meta_size" - ] = args.use_style_cond_and_image_meta_size ### version <= v1.1: True; version >= v1.2: False + model_config["use_style_cond_and_image_meta_size"] = ( + args.use_style_cond_and_image_meta_size + ) ### version <= v1.1: True; version >= v1.2: False print(model_config) for key in state_dict: diff --git a/scripts/convert_hunyuandit_to_diffusers.py b/scripts/convert_hunyuandit_to_diffusers.py index ef174098a815..65fcccb22a1a 100644 --- a/scripts/convert_hunyuandit_to_diffusers.py +++ b/scripts/convert_hunyuandit_to_diffusers.py @@ -18,9 +18,9 @@ def main(args): device = "cuda" model_config = HunyuanDiT2DModel.load_config("Tencent-Hunyuan/HunyuanDiT-Diffusers", subfolder="transformer") - model_config[ - "use_style_cond_and_image_meta_size" - ] = args.use_style_cond_and_image_meta_size ### version <= v1.1: True; version >= v1.2: False + model_config["use_style_cond_and_image_meta_size"] = ( + args.use_style_cond_and_image_meta_size + ) ### version <= v1.1: True; version >= v1.2: False # input_size -> sample_size, text_dim -> cross_attention_dim for key in state_dict: diff --git a/scripts/convert_k_upscaler_to_diffusers.py b/scripts/convert_k_upscaler_to_diffusers.py index 76d374e565d7..cff845ef8099 100644 --- a/scripts/convert_k_upscaler_to_diffusers.py +++ b/scripts/convert_k_upscaler_to_diffusers.py @@ -220,9 +220,9 @@ def unet_model_from_original_config(original_config): block_out_channels = original_config["channels"] - assert ( - len(set(original_config["depths"])) == 1 - ), "UNet2DConditionModel currently do not support blocks with different number of layers" + assert len(set(original_config["depths"])) == 1, ( + "UNet2DConditionModel currently do not support blocks with different number of layers" + ) layers_per_block = original_config["depths"][0] class_labels_dim = original_config["mapping_cond_dim"] diff --git a/scripts/convert_mochi_to_diffusers.py b/scripts/convert_mochi_to_diffusers.py index 642045853591..64e4f69eac17 100644 --- a/scripts/convert_mochi_to_diffusers.py +++ b/scripts/convert_mochi_to_diffusers.py @@ -305,9 +305,9 @@ def convert_mochi_vae_state_dict_to_diffusers(encoder_ckpt_path, decoder_ckpt_pa for i in range(down_block_layers[block]): # Convert resnets - new_state_dict[ - f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.weight" - ] = encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.0.weight") + new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.weight"] = ( + encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.0.weight") + ) new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.0.bias" ) @@ -317,9 +317,9 @@ def convert_mochi_vae_state_dict_to_diffusers(encoder_ckpt_path, decoder_ckpt_pa new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.conv1.conv.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.2.bias" ) - new_state_dict[ - f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.weight" - ] = encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.3.weight") + new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.weight"] = ( + encoder_state_dict.pop(f"layers.{block + 4}.layers.{i + 1}.stack.3.weight") + ) new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.bias"] = encoder_state_dict.pop( f"layers.{block + 4}.layers.{i + 1}.stack.3.bias" ) diff --git a/scripts/convert_svd_to_diffusers.py b/scripts/convert_svd_to_diffusers.py index 3243ce294b26..e46410ccb3bd 100644 --- a/scripts/convert_svd_to_diffusers.py +++ b/scripts/convert_svd_to_diffusers.py @@ -381,9 +381,9 @@ def convert_ldm_unet_checkpoint( # TODO resnet time_mixer.mix_factor if f"input_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict: - new_checkpoint[ - f"down_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor" - ] = unet_state_dict[f"input_blocks.{i}.0.time_mixer.mix_factor"] + new_checkpoint[f"down_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"] = ( + unet_state_dict[f"input_blocks.{i}.0.time_mixer.mix_factor"] + ) if len(attentions): paths = renew_attention_paths(attentions) @@ -478,9 +478,9 @@ def convert_ldm_unet_checkpoint( ) if f"output_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict: - new_checkpoint[ - f"up_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor" - ] = unet_state_dict[f"output_blocks.{i}.0.time_mixer.mix_factor"] + new_checkpoint[f"up_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"] = ( + unet_state_dict[f"output_blocks.{i}.0.time_mixer.mix_factor"] + ) output_block_list = {k: sorted(v) for k, v in output_block_list.items()} if ["conv.bias", "conv.weight"] in output_block_list.values(): diff --git a/scripts/convert_vq_diffusion_to_diffusers.py b/scripts/convert_vq_diffusion_to_diffusers.py index 7da6b4094986..fe62d18faff0 100644 --- a/scripts/convert_vq_diffusion_to_diffusers.py +++ b/scripts/convert_vq_diffusion_to_diffusers.py @@ -51,9 +51,9 @@ def vqvae_model_from_original_config(original_config): - assert ( - original_config["target"] in PORTED_VQVAES - ), f"{original_config['target']} has not yet been ported to diffusers." + assert original_config["target"] in PORTED_VQVAES, ( + f"{original_config['target']} has not yet been ported to diffusers." + ) original_config = original_config["params"] @@ -464,15 +464,15 @@ def vqvae_attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_p def transformer_model_from_original_config( original_diffusion_config, original_transformer_config, original_content_embedding_config ): - assert ( - original_diffusion_config["target"] in PORTED_DIFFUSIONS - ), f"{original_diffusion_config['target']} has not yet been ported to diffusers." - assert ( - original_transformer_config["target"] in PORTED_TRANSFORMERS - ), f"{original_transformer_config['target']} has not yet been ported to diffusers." - assert ( - original_content_embedding_config["target"] in PORTED_CONTENT_EMBEDDINGS - ), f"{original_content_embedding_config['target']} has not yet been ported to diffusers." + assert original_diffusion_config["target"] in PORTED_DIFFUSIONS, ( + f"{original_diffusion_config['target']} has not yet been ported to diffusers." + ) + assert original_transformer_config["target"] in PORTED_TRANSFORMERS, ( + f"{original_transformer_config['target']} has not yet been ported to diffusers." + ) + assert original_content_embedding_config["target"] in PORTED_CONTENT_EMBEDDINGS, ( + f"{original_content_embedding_config['target']} has not yet been ported to diffusers." + ) original_diffusion_config = original_diffusion_config["params"] original_transformer_config = original_transformer_config["params"] diff --git a/src/diffusers/loaders/lora_conversion_utils.py b/src/diffusers/loaders/lora_conversion_utils.py index 5ec16ff299eb..791b7ae9b14f 100644 --- a/src/diffusers/loaders/lora_conversion_utils.py +++ b/src/diffusers/loaders/lora_conversion_utils.py @@ -184,9 +184,9 @@ def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_ # Store DoRA scale if present. if dora_present_in_unet: dora_scale_key_to_replace = "_lora.down." if "_lora.down." in diffusers_name else ".lora.down." - unet_state_dict[ - diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.") - ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) + unet_state_dict[diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.")] = ( + state_dict.pop(key.replace("lora_down.weight", "dora_scale")) + ) # Handle text encoder LoRAs. elif lora_name.startswith(("lora_te_", "lora_te1_", "lora_te2_")): @@ -206,13 +206,13 @@ def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_ "_lora.down." if "_lora.down." in diffusers_name else ".lora_linear_layer." ) if lora_name.startswith(("lora_te_", "lora_te1_")): - te_state_dict[ - diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.") - ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) + te_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = ( + state_dict.pop(key.replace("lora_down.weight", "dora_scale")) + ) elif lora_name.startswith("lora_te2_"): - te2_state_dict[ - diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.") - ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) + te2_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = ( + state_dict.pop(key.replace("lora_down.weight", "dora_scale")) + ) # Store alpha if present. if lora_name_alpha in state_dict: @@ -1020,21 +1020,21 @@ def _convert_bfl_flux_control_lora_to_diffusers(original_state_dict): for lora_key in ["lora_A", "lora_B"]: ## time_text_embed.timestep_embedder <- time_in - converted_state_dict[ - f"time_text_embed.timestep_embedder.linear_1.{lora_key}.weight" - ] = original_state_dict.pop(f"time_in.in_layer.{lora_key}.weight") + converted_state_dict[f"time_text_embed.timestep_embedder.linear_1.{lora_key}.weight"] = ( + original_state_dict.pop(f"time_in.in_layer.{lora_key}.weight") + ) if f"time_in.in_layer.{lora_key}.bias" in original_state_dict_keys: - converted_state_dict[ - f"time_text_embed.timestep_embedder.linear_1.{lora_key}.bias" - ] = original_state_dict.pop(f"time_in.in_layer.{lora_key}.bias") + converted_state_dict[f"time_text_embed.timestep_embedder.linear_1.{lora_key}.bias"] = ( + original_state_dict.pop(f"time_in.in_layer.{lora_key}.bias") + ) - converted_state_dict[ - f"time_text_embed.timestep_embedder.linear_2.{lora_key}.weight" - ] = original_state_dict.pop(f"time_in.out_layer.{lora_key}.weight") + converted_state_dict[f"time_text_embed.timestep_embedder.linear_2.{lora_key}.weight"] = ( + original_state_dict.pop(f"time_in.out_layer.{lora_key}.weight") + ) if f"time_in.out_layer.{lora_key}.bias" in original_state_dict_keys: - converted_state_dict[ - f"time_text_embed.timestep_embedder.linear_2.{lora_key}.bias" - ] = original_state_dict.pop(f"time_in.out_layer.{lora_key}.bias") + converted_state_dict[f"time_text_embed.timestep_embedder.linear_2.{lora_key}.bias"] = ( + original_state_dict.pop(f"time_in.out_layer.{lora_key}.bias") + ) ## time_text_embed.text_embedder <- vector_in converted_state_dict[f"time_text_embed.text_embedder.linear_1.{lora_key}.weight"] = original_state_dict.pop( @@ -1056,21 +1056,21 @@ def _convert_bfl_flux_control_lora_to_diffusers(original_state_dict): # guidance has_guidance = any("guidance" in k for k in original_state_dict) if has_guidance: - converted_state_dict[ - f"time_text_embed.guidance_embedder.linear_1.{lora_key}.weight" - ] = original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.weight") + converted_state_dict[f"time_text_embed.guidance_embedder.linear_1.{lora_key}.weight"] = ( + original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.weight") + ) if f"guidance_in.in_layer.{lora_key}.bias" in original_state_dict_keys: - converted_state_dict[ - f"time_text_embed.guidance_embedder.linear_1.{lora_key}.bias" - ] = original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.bias") + converted_state_dict[f"time_text_embed.guidance_embedder.linear_1.{lora_key}.bias"] = ( + original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.bias") + ) - converted_state_dict[ - f"time_text_embed.guidance_embedder.linear_2.{lora_key}.weight" - ] = original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.weight") + converted_state_dict[f"time_text_embed.guidance_embedder.linear_2.{lora_key}.weight"] = ( + original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.weight") + ) if f"guidance_in.out_layer.{lora_key}.bias" in original_state_dict_keys: - converted_state_dict[ - f"time_text_embed.guidance_embedder.linear_2.{lora_key}.bias" - ] = original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.bias") + converted_state_dict[f"time_text_embed.guidance_embedder.linear_2.{lora_key}.bias"] = ( + original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.bias") + ) # context_embedder converted_state_dict[f"context_embedder.{lora_key}.weight"] = original_state_dict.pop( diff --git a/src/diffusers/models/transformers/transformer_2d.py b/src/diffusers/models/transformers/transformer_2d.py index a88ee6c9c9b8..5515a7885098 100644 --- a/src/diffusers/models/transformers/transformer_2d.py +++ b/src/diffusers/models/transformers/transformer_2d.py @@ -211,9 +211,9 @@ def _init_continuous_input(self, norm_type): def _init_vectorized_inputs(self, norm_type): assert self.config.sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" - assert ( - self.config.num_vector_embeds is not None - ), "Transformer2DModel over discrete input must provide num_embed" + assert self.config.num_vector_embeds is not None, ( + "Transformer2DModel over discrete input must provide num_embed" + ) self.height = self.config.sample_size self.width = self.config.sample_size diff --git a/src/diffusers/pipelines/free_noise_utils.py b/src/diffusers/pipelines/free_noise_utils.py index dc0071a494e3..8ea5eb7dd575 100644 --- a/src/diffusers/pipelines/free_noise_utils.py +++ b/src/diffusers/pipelines/free_noise_utils.py @@ -341,9 +341,9 @@ def _encode_prompt_free_noise( start_tensor = negative_prompt_embeds[i].unsqueeze(0) end_tensor = negative_prompt_embeds[i + 1].unsqueeze(0) - negative_prompt_interpolation_embeds[ - start_frame : end_frame + 1 - ] = self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor) + negative_prompt_interpolation_embeds[start_frame : end_frame + 1] = ( + self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor) + ) prompt_embeds = prompt_interpolation_embeds negative_prompt_embeds = negative_prompt_interpolation_embeds diff --git a/src/diffusers/pipelines/omnigen/processor_omnigen.py b/src/diffusers/pipelines/omnigen/processor_omnigen.py index 75d272ac5140..40fac01f8f8a 100644 --- a/src/diffusers/pipelines/omnigen/processor_omnigen.py +++ b/src/diffusers/pipelines/omnigen/processor_omnigen.py @@ -95,13 +95,13 @@ def process_multi_modal_prompt(self, text, input_images): image_ids = [int(s.split("|")[1].split("_")[-1]) for s in image_tags] unique_image_ids = sorted(set(image_ids)) - assert unique_image_ids == list( - range(1, len(unique_image_ids) + 1) - ), f"image_ids must start from 1, and must be continuous int, e.g. [1, 2, 3], cannot be {unique_image_ids}" + assert unique_image_ids == list(range(1, len(unique_image_ids) + 1)), ( + f"image_ids must start from 1, and must be continuous int, e.g. [1, 2, 3], cannot be {unique_image_ids}" + ) # total images must be the same as the number of image tags - assert ( - len(unique_image_ids) == len(input_images) - ), f"total images must be the same as the number of image tags, got {len(unique_image_ids)} image tags and {len(input_images)} images" + assert len(unique_image_ids) == len(input_images), ( + f"total images must be the same as the number of image tags, got {len(unique_image_ids)} image tags and {len(input_images)} images" + ) input_images = [input_images[x - 1] for x in image_ids] diff --git a/src/diffusers/pipelines/shap_e/renderer.py b/src/diffusers/pipelines/shap_e/renderer.py index 9d9f9d9b2ab1..dd25945590cd 100644 --- a/src/diffusers/pipelines/shap_e/renderer.py +++ b/src/diffusers/pipelines/shap_e/renderer.py @@ -983,9 +983,9 @@ def decode_to_mesh( fields = torch.cat(fields, dim=1) fields = fields.float() - assert ( - len(fields.shape) == 3 and fields.shape[-1] == 1 - ), f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}" + assert len(fields.shape) == 3 and fields.shape[-1] == 1, ( + f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}" + ) fields = fields.reshape(1, *([grid_size] * 3)) @@ -1039,9 +1039,9 @@ def decode_to_mesh( textures = textures.float() # 3.3 augument the mesh with texture data - assert len(textures.shape) == 3 and textures.shape[-1] == len( - texture_channels - ), f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}" + assert len(textures.shape) == 3 and textures.shape[-1] == len(texture_channels), ( + f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}" + ) for m, texture in zip(raw_meshes, textures): texture = texture[: len(m.verts)] diff --git a/src/diffusers/quantizers/base.py b/src/diffusers/quantizers/base.py index 1c75b5bef933..fa9ba98e6d0d 100644 --- a/src/diffusers/quantizers/base.py +++ b/src/diffusers/quantizers/base.py @@ -215,19 +215,15 @@ def _dequantize(self, model): ) @abstractmethod - def _process_model_before_weight_loading(self, model, **kwargs): - ... + def _process_model_before_weight_loading(self, model, **kwargs): ... @abstractmethod - def _process_model_after_weight_loading(self, model, **kwargs): - ... + def _process_model_after_weight_loading(self, model, **kwargs): ... @property @abstractmethod - def is_serializable(self): - ... + def is_serializable(self): ... @property @abstractmethod - def is_trainable(self): - ... + def is_trainable(self): ... diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index 6155ac2e39fd..f82a2407f333 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -299,9 +299,9 @@ def test_one_request_upon_cached(self): ) download_requests = [r.method for r in m.request_history] - assert ( - download_requests.count("HEAD") == 3 - ), "3 HEAD requests one for config, one for model, and one for shard index file." + assert download_requests.count("HEAD") == 3, ( + "3 HEAD requests one for config, one for model, and one for shard index file." + ) assert download_requests.count("GET") == 2, "2 GET requests one for config, one for model" with requests_mock.mock(real_http=True) as m: @@ -313,9 +313,9 @@ def test_one_request_upon_cached(self): ) cache_requests = [r.method for r in m.request_history] - assert ( - "HEAD" == cache_requests[0] and len(cache_requests) == 2 - ), "We should call only `model_info` to check for commit hash and knowing if shard index is present." + assert "HEAD" == cache_requests[0] and len(cache_requests) == 2, ( + "We should call only `model_info` to check for commit hash and knowing if shard index is present." + ) def test_weight_overwrite(self): with tempfile.TemporaryDirectory() as tmpdirname, self.assertRaises(ValueError) as error_context: diff --git a/tests/models/transformers/test_models_transformer_sd3.py b/tests/models/transformers/test_models_transformer_sd3.py index 659d9a82fd76..bfef1fc4f09b 100644 --- a/tests/models/transformers/test_models_transformer_sd3.py +++ b/tests/models/transformers/test_models_transformer_sd3.py @@ -92,9 +92,9 @@ def test_xformers_enable_works(self): model.enable_xformers_memory_efficient_attention() - assert ( - model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor" - ), "xformers is not enabled" + assert model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor", ( + "xformers is not enabled" + ) @unittest.skip("SD3Transformer2DModel uses a dedicated attention processor. This test doesn't apply") def test_set_attn_processor_for_determinism(self): @@ -167,9 +167,9 @@ def test_xformers_enable_works(self): model.enable_xformers_memory_efficient_attention() - assert ( - model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor" - ), "xformers is not enabled" + assert model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor", ( + "xformers is not enabled" + ) @unittest.skip("SD3Transformer2DModel uses a dedicated attention processor. This test doesn't apply") def test_set_attn_processor_for_determinism(self): diff --git a/tests/models/unets/test_models_unet_2d_condition.py b/tests/models/unets/test_models_unet_2d_condition.py index 8e1187f11468..d01a0b493520 100644 --- a/tests/models/unets/test_models_unet_2d_condition.py +++ b/tests/models/unets/test_models_unet_2d_condition.py @@ -654,22 +654,22 @@ def test_model_xattn_mask(self, mask_dtype): keepall_mask = torch.ones(*cond.shape[:-1], device=cond.device, dtype=mask_dtype) full_cond_keepallmask_out = model(**{**inputs_dict, "encoder_attention_mask": keepall_mask}).sample - assert full_cond_keepallmask_out.allclose( - full_cond_out, rtol=1e-05, atol=1e-05 - ), "a 'keep all' mask should give the same result as no mask" + assert full_cond_keepallmask_out.allclose(full_cond_out, rtol=1e-05, atol=1e-05), ( + "a 'keep all' mask should give the same result as no mask" + ) trunc_cond = cond[:, :-1, :] trunc_cond_out = model(**{**inputs_dict, "encoder_hidden_states": trunc_cond}).sample - assert not trunc_cond_out.allclose( - full_cond_out, rtol=1e-05, atol=1e-05 - ), "discarding the last token from our cond should change the result" + assert not trunc_cond_out.allclose(full_cond_out, rtol=1e-05, atol=1e-05), ( + "discarding the last token from our cond should change the result" + ) batch, tokens, _ = cond.shape mask_last = (torch.arange(tokens) < tokens - 1).expand(batch, -1).to(cond.device, mask_dtype) masked_cond_out = model(**{**inputs_dict, "encoder_attention_mask": mask_last}).sample - assert masked_cond_out.allclose( - trunc_cond_out, rtol=1e-05, atol=1e-05 - ), "masking the last token from our cond should be equivalent to truncating that token out of the condition" + assert masked_cond_out.allclose(trunc_cond_out, rtol=1e-05, atol=1e-05), ( + "masking the last token from our cond should be equivalent to truncating that token out of the condition" + ) # see diffusers.models.attention_processor::Attention#prepare_attention_mask # note: we may not need to fix mask padding to work for stable-diffusion cross-attn masks. @@ -697,9 +697,9 @@ def test_model_xattn_padding(self): trunc_mask = torch.zeros(batch, tokens - 1, device=cond.device, dtype=torch.bool) trunc_mask_out = model(**{**inputs_dict, "encoder_attention_mask": trunc_mask}).sample - assert trunc_mask_out.allclose( - keeplast_out - ), "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask." + assert trunc_mask_out.allclose(keeplast_out), ( + "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask." + ) def test_custom_diffusion_processors(self): # enable deterministic behavior for gradient checkpointing @@ -1114,12 +1114,12 @@ def test_load_attn_procs_raise_warning(self): with torch.no_grad(): lora_sample_2 = model(**inputs_dict).sample - assert not torch.allclose( - non_lora_sample, lora_sample_1, atol=1e-4, rtol=1e-4 - ), "LoRA injected UNet should produce different results." - assert torch.allclose( - lora_sample_1, lora_sample_2, atol=1e-4, rtol=1e-4 - ), "Loading from a saved checkpoint should produce identical results." + assert not torch.allclose(non_lora_sample, lora_sample_1, atol=1e-4, rtol=1e-4), ( + "LoRA injected UNet should produce different results." + ) + assert torch.allclose(lora_sample_1, lora_sample_2, atol=1e-4, rtol=1e-4), ( + "Loading from a saved checkpoint should produce identical results." + ) @require_peft_backend def test_save_attn_procs_raise_warning(self): diff --git a/tests/others/test_image_processor.py b/tests/others/test_image_processor.py index 3397ca9e394a..071194c59ead 100644 --- a/tests/others/test_image_processor.py +++ b/tests/others/test_image_processor.py @@ -65,9 +65,9 @@ def test_vae_image_processor_pt(self): ) out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np - assert ( - np.abs(in_np - out_np).max() < 1e-6 - ), f"decoded output does not match input for output_type {output_type}" + assert np.abs(in_np - out_np).max() < 1e-6, ( + f"decoded output does not match input for output_type {output_type}" + ) def test_vae_image_processor_np(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) @@ -78,9 +78,9 @@ def test_vae_image_processor_np(self): out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np - assert ( - np.abs(in_np - out_np).max() < 1e-6 - ), f"decoded output does not match input for output_type {output_type}" + assert np.abs(in_np - out_np).max() < 1e-6, ( + f"decoded output does not match input for output_type {output_type}" + ) def test_vae_image_processor_pil(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) @@ -93,9 +93,9 @@ def test_vae_image_processor_pil(self): for i, o in zip(input_pil, out): in_np = np.array(i) out_np = self.to_np(out) if output_type == "pil" else (self.to_np(out) * 255).round() - assert ( - np.abs(in_np - out_np).max() < 1e-6 - ), f"decoded output does not match input for output_type {output_type}" + assert np.abs(in_np - out_np).max() < 1e-6, ( + f"decoded output does not match input for output_type {output_type}" + ) def test_preprocess_input_3d(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) @@ -293,9 +293,9 @@ def test_vae_image_processor_resize_pt(self): scale = 2 out_pt = image_processor.resize(image=input_pt, height=h // scale, width=w // scale) exp_pt_shape = (b, c, h // scale, w // scale) - assert ( - out_pt.shape == exp_pt_shape - ), f"resized image output shape '{out_pt.shape}' didn't match expected shape '{exp_pt_shape}'." + assert out_pt.shape == exp_pt_shape, ( + f"resized image output shape '{out_pt.shape}' didn't match expected shape '{exp_pt_shape}'." + ) def test_vae_image_processor_resize_np(self): image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) @@ -305,6 +305,6 @@ def test_vae_image_processor_resize_np(self): input_np = self.to_np(input_pt) out_np = image_processor.resize(image=input_np, height=h // scale, width=w // scale) exp_np_shape = (b, h // scale, w // scale, c) - assert ( - out_np.shape == exp_np_shape - ), f"resized image output shape '{out_np.shape}' didn't match expected shape '{exp_np_shape}'." + assert out_np.shape == exp_np_shape, ( + f"resized image output shape '{out_np.shape}' didn't match expected shape '{exp_np_shape}'." + ) diff --git a/tests/pipelines/amused/test_amused.py b/tests/pipelines/amused/test_amused.py index a0fbc5df1c28..ac579bbf2be2 100644 --- a/tests/pipelines/amused/test_amused.py +++ b/tests/pipelines/amused/test_amused.py @@ -126,8 +126,7 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): - ... + def test_inference_batch_single_identical(self): ... @slow diff --git a/tests/pipelines/amused/test_amused_img2img.py b/tests/pipelines/amused/test_amused_img2img.py index 2699bbe7f56f..942735f15707 100644 --- a/tests/pipelines/amused/test_amused_img2img.py +++ b/tests/pipelines/amused/test_amused_img2img.py @@ -126,8 +126,7 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): - ... + def test_inference_batch_single_identical(self): ... @slow diff --git a/tests/pipelines/amused/test_amused_inpaint.py b/tests/pipelines/amused/test_amused_inpaint.py index 645379a7eab1..541b988f1798 100644 --- a/tests/pipelines/amused/test_amused_inpaint.py +++ b/tests/pipelines/amused/test_amused_inpaint.py @@ -130,8 +130,7 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): - ... + def test_inference_batch_single_identical(self): ... @slow diff --git a/tests/pipelines/aura_flow/test_pipeline_aura_flow.py b/tests/pipelines/aura_flow/test_pipeline_aura_flow.py index c56aeb905ac3..1eb9d1035c33 100644 --- a/tests/pipelines/aura_flow/test_pipeline_aura_flow.py +++ b/tests/pipelines/aura_flow/test_pipeline_aura_flow.py @@ -106,9 +106,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -122,15 +122,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) @unittest.skip("xformers attention processor does not exist for AuraFlow") def test_xformers_attention_forwardGenerator_pass(self): diff --git a/tests/pipelines/blipdiffusion/test_blipdiffusion.py b/tests/pipelines/blipdiffusion/test_blipdiffusion.py index e073f55aec9e..db8d36b23a4b 100644 --- a/tests/pipelines/blipdiffusion/test_blipdiffusion.py +++ b/tests/pipelines/blipdiffusion/test_blipdiffusion.py @@ -195,9 +195,9 @@ def test_blipdiffusion(self): [0.5329548, 0.8372512, 0.33269387, 0.82096875, 0.43657133, 0.3783, 0.5953028, 0.51934963, 0.42142007] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {image_slice.flatten()}, but got {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {image_slice.flatten()}, but got {image_slice.flatten()}" + ) @unittest.skip("Test not supported because of complexities in deriving query_embeds.") def test_encode_prompt_works_in_isolation(self): diff --git a/tests/pipelines/cogvideo/test_cogvideox.py b/tests/pipelines/cogvideo/test_cogvideox.py index 388dc9ef7ec4..a9de0ff05fe8 100644 --- a/tests/pipelines/cogvideo/test_cogvideox.py +++ b/tests/pipelines/cogvideo/test_cogvideox.py @@ -299,9 +299,9 @@ def test_fused_qkv_projections(self): original_image_slice = frames[0, -2:, -1, -3:, -3:] pipe.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -315,15 +315,15 @@ def test_fused_qkv_projections(self): frames = pipe(**inputs).frames image_slice_disabled = frames[0, -2:, -1, -3:, -3:] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) @slow diff --git a/tests/pipelines/cogvideo/test_cogvideox_fun_control.py b/tests/pipelines/cogvideo/test_cogvideox_fun_control.py index 2e962bd247b9..4f32da7ac4ae 100644 --- a/tests/pipelines/cogvideo/test_cogvideox_fun_control.py +++ b/tests/pipelines/cogvideo/test_cogvideox_fun_control.py @@ -299,9 +299,9 @@ def test_fused_qkv_projections(self): original_image_slice = frames[0, -2:, -1, -3:, -3:] pipe.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -315,12 +315,12 @@ def test_fused_qkv_projections(self): frames = pipe(**inputs).frames image_slice_disabled = frames[0, -2:, -1, -3:, -3:] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) diff --git a/tests/pipelines/cogvideo/test_cogvideox_image2video.py b/tests/pipelines/cogvideo/test_cogvideox_image2video.py index cac47f1a83d4..ec4e51bd1bad 100644 --- a/tests/pipelines/cogvideo/test_cogvideox_image2video.py +++ b/tests/pipelines/cogvideo/test_cogvideox_image2video.py @@ -317,9 +317,9 @@ def test_fused_qkv_projections(self): original_image_slice = frames[0, -2:, -1, -3:, -3:] pipe.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -333,15 +333,15 @@ def test_fused_qkv_projections(self): frames = pipe(**inputs).frames image_slice_disabled = frames[0, -2:, -1, -3:, -3:] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) @slow diff --git a/tests/pipelines/cogvideo/test_cogvideox_video2video.py b/tests/pipelines/cogvideo/test_cogvideox_video2video.py index 4d836cb5e2a4..b1ac8cbd90ed 100644 --- a/tests/pipelines/cogvideo/test_cogvideox_video2video.py +++ b/tests/pipelines/cogvideo/test_cogvideox_video2video.py @@ -298,9 +298,9 @@ def test_fused_qkv_projections(self): original_image_slice = frames[0, -2:, -1, -3:, -3:] pipe.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -314,12 +314,12 @@ def test_fused_qkv_projections(self): frames = pipe(**inputs).frames image_slice_disabled = frames[0, -2:, -1, -3:, -3:] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) diff --git a/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py b/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py index eedda4e21722..a5768cb51fbf 100644 --- a/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py +++ b/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py @@ -219,9 +219,9 @@ def test_blipdiffusion_controlnet(self): assert image.shape == (1, 16, 16, 4) expected_slice = np.array([0.7953, 0.7136, 0.6597, 0.4779, 0.7389, 0.4111, 0.5826, 0.4150, 0.8422]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) @unittest.skip("Test not supported because of complexities in deriving query_embeds.") def test_encode_prompt_works_in_isolation(self): diff --git a/tests/pipelines/controlnet_flux/test_controlnet_flux.py b/tests/pipelines/controlnet_flux/test_controlnet_flux.py index 9a270c2bbf07..9ce62cde9fe4 100644 --- a/tests/pipelines/controlnet_flux/test_controlnet_flux.py +++ b/tests/pipelines/controlnet_flux/test_controlnet_flux.py @@ -178,9 +178,9 @@ def test_controlnet_flux(self): [0.47387695, 0.63134766, 0.5605469, 0.61621094, 0.7207031, 0.7089844, 0.70410156, 0.6113281, 0.64160156] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f"Expected: {expected_slice}, got: {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f"Expected: {expected_slice}, got: {image_slice.flatten()}" + ) @unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention") def test_xformers_attention_forwardGenerator_pass(self): diff --git a/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py b/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py index 59ccb9237819..8d63619c402b 100644 --- a/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py +++ b/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py @@ -170,9 +170,9 @@ def test_fused_qkv_projections(self): original_image_slice = image[0, -3:, -3:, -1] pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -186,15 +186,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) diff --git a/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py b/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py index f7b3db05c8af..4bd7f59dc0a8 100644 --- a/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py +++ b/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py @@ -162,9 +162,9 @@ def test_controlnet_hunyuandit(self): [0.6953125, 0.89208984, 0.59375, 0.5078125, 0.5786133, 0.6035156, 0.5839844, 0.53564453, 0.52246094] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f"Expected: {expected_slice}, got: {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f"Expected: {expected_slice}, got: {image_slice.flatten()}" + ) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( diff --git a/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py b/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py index 2cd57ce56d52..d9f5dcad7d61 100644 --- a/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py +++ b/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py @@ -194,9 +194,9 @@ def test_controlnet_inpaint_sd3(self): [0.51708984, 0.7421875, 0.4580078, 0.6435547, 0.65625, 0.43603516, 0.5151367, 0.65722656, 0.60839844] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f"Expected: {expected_slice}, got: {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f"Expected: {expected_slice}, got: {image_slice.flatten()}" + ) @unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention") def test_xformers_attention_forwardGenerator_pass(self): diff --git a/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py index 84ce09acbe1a..1be15645efd7 100644 --- a/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py +++ b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py @@ -202,9 +202,9 @@ def run_pipe(self, components, use_sd35=False): else: expected_slice = np.array([1.0000, 0.9072, 0.4209, 0.2744, 0.5737, 0.3840, 0.6113, 0.6250, 0.6328]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f"Expected: {expected_slice}, got: {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f"Expected: {expected_slice}, got: {image_slice.flatten()}" + ) def test_controlnet_sd3(self): components = self.get_dummy_components() diff --git a/tests/pipelines/flux/test_pipeline_flux.py b/tests/pipelines/flux/test_pipeline_flux.py index 6a560367a5b8..646ad928ec05 100644 --- a/tests/pipelines/flux/test_pipeline_flux.py +++ b/tests/pipelines/flux/test_pipeline_flux.py @@ -170,9 +170,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -186,15 +186,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) diff --git a/tests/pipelines/flux/test_pipeline_flux_control.py b/tests/pipelines/flux/test_pipeline_flux_control.py index d8293952adcb..d8d0774e1e32 100644 --- a/tests/pipelines/flux/test_pipeline_flux_control.py +++ b/tests/pipelines/flux/test_pipeline_flux_control.py @@ -140,9 +140,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -156,15 +156,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) diff --git a/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py b/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py index 44ce2a4dedfc..a2f7c9171082 100644 --- a/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py +++ b/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py @@ -134,9 +134,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -150,15 +150,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) diff --git a/tests/pipelines/hunyuandit/test_hunyuan_dit.py b/tests/pipelines/hunyuandit/test_hunyuan_dit.py index 5c3a2cbea7ba..66453b73b0b3 100644 --- a/tests/pipelines/hunyuandit/test_hunyuan_dit.py +++ b/tests/pipelines/hunyuandit/test_hunyuan_dit.py @@ -174,9 +174,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -192,15 +192,15 @@ def test_fused_qkv_projections(self): image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) @unittest.skip( "Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have." diff --git a/tests/pipelines/kandinsky/test_kandinsky.py b/tests/pipelines/kandinsky/test_kandinsky.py index 30144e37a9d4..f4de6f3a5338 100644 --- a/tests/pipelines/kandinsky/test_kandinsky.py +++ b/tests/pipelines/kandinsky/test_kandinsky.py @@ -240,12 +240,12 @@ def test_kandinsky(self): expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py index c5f27a9cc9a9..f14a741d7dc1 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -98,12 +98,12 @@ def test_kandinsky(self): expected_slice = np.array([0.2893, 0.1464, 0.4603, 0.3529, 0.4612, 0.7701, 0.4027, 0.3051, 0.5155]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): @@ -206,12 +206,12 @@ def test_kandinsky(self): expected_slice = np.array([0.4852, 0.4136, 0.4539, 0.4781, 0.4680, 0.5217, 0.4973, 0.4089, 0.4977]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): @@ -318,12 +318,12 @@ def test_kandinsky(self): expected_slice = np.array([0.0320, 0.0860, 0.4013, 0.0518, 0.2484, 0.5847, 0.4411, 0.2321, 0.4593]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/kandinsky/test_kandinsky_img2img.py b/tests/pipelines/kandinsky/test_kandinsky_img2img.py index fee29fb150b7..169709978042 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky/test_kandinsky_img2img.py @@ -261,12 +261,12 @@ def test_kandinsky_img2img(self): assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5816, 0.5872, 0.4634, 0.5982, 0.4767, 0.4710, 0.4669, 0.4717, 0.4966]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py index 79faa95984a0..d4d5c4e48f78 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py @@ -256,12 +256,12 @@ def test_kandinsky_inpaint(self): expected_slice = np.array([0.8222, 0.8896, 0.4373, 0.8088, 0.4905, 0.2609, 0.6816, 0.4291, 0.5129]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky.py b/tests/pipelines/kandinsky2_2/test_kandinsky.py index fea49d47b7bb..aa17f6fc5d6b 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky.py @@ -210,13 +210,13 @@ def test_kandinsky(self): expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py b/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py index 90f8b2034109..17ef3dc2601e 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py @@ -103,12 +103,12 @@ def test_kandinsky(self): expected_slice = np.array([0.3076, 0.2729, 0.5668, 0.0522, 0.3384, 0.7028, 0.4908, 0.3659, 0.6243]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): @@ -227,12 +227,12 @@ def test_kandinsky(self): expected_slice = np.array([0.4445, 0.4287, 0.4596, 0.3919, 0.3730, 0.5039, 0.4834, 0.4269, 0.5521]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): @@ -350,12 +350,12 @@ def test_kandinsky(self): expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py index 1f3219e0d69e..10a95d6177b2 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py @@ -210,13 +210,13 @@ def test_kandinsky_controlnet(self): [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py index 8613f5acf045..58fbbecc0569 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py @@ -218,12 +218,12 @@ def test_kandinsky_controlnet_img2img(self): expected_slice = np.array( [0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1.75e-3) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py b/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py index e0d90071f111..aa7589a212eb 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py @@ -228,12 +228,12 @@ def test_kandinsky_img2img(self): assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py index c9c5f4155828..d7ac69820761 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py @@ -234,12 +234,12 @@ def test_kandinsky_inpaint(self): [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) diff --git a/tests/pipelines/kandinsky3/test_kandinsky3.py b/tests/pipelines/kandinsky3/test_kandinsky3.py index af1d45ff8975..c54b91f024af 100644 --- a/tests/pipelines/kandinsky3/test_kandinsky3.py +++ b/tests/pipelines/kandinsky3/test_kandinsky3.py @@ -157,9 +157,9 @@ def test_kandinsky3(self): expected_slice = np.array([0.3768, 0.4373, 0.4865, 0.4890, 0.4299, 0.5122, 0.4921, 0.4924, 0.5599]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py b/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py index e00948621a06..088c32e2860e 100644 --- a/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py +++ b/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py @@ -181,9 +181,9 @@ def test_kandinsky3_img2img(self): [0.576259, 0.6132097, 0.41703486, 0.603196, 0.62062526, 0.4655338, 0.5434324, 0.5660727, 0.65433365] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/pag/test_pag_animatediff.py b/tests/pipelines/pag/test_pag_animatediff.py index 6fa96275406f..b9ce29c70bdf 100644 --- a/tests/pipelines/pag/test_pag_animatediff.py +++ b/tests/pipelines/pag/test_pag_animatediff.py @@ -450,9 +450,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).frames[0, -3:, -3:, -1] components = self.get_dummy_components() diff --git a/tests/pipelines/pag/test_pag_controlnet_sd.py b/tests/pipelines/pag/test_pag_controlnet_sd.py index ee97b0507a34..02232c7379bd 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sd.py +++ b/tests/pipelines/pag/test_pag_controlnet_sd.py @@ -169,9 +169,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py b/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py index 25ef5d253d68..cfc0b218d2e4 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py +++ b/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py @@ -165,9 +165,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__calss__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__calss__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_controlnet_sdxl.py b/tests/pipelines/pag/test_pag_controlnet_sdxl.py index 0588e26286a8..10adff7fe0a6 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sdxl.py +++ b/tests/pipelines/pag/test_pag_controlnet_sdxl.py @@ -187,9 +187,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py b/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py index 63c7d9fbee2d..fe4b615f646b 100644 --- a/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py +++ b/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py @@ -189,9 +189,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_hunyuan_dit.py b/tests/pipelines/pag/test_pag_hunyuan_dit.py index 31cd9aa666de..d6cfbbed9e95 100644 --- a/tests/pipelines/pag/test_pag_hunyuan_dit.py +++ b/tests/pipelines/pag/test_pag_hunyuan_dit.py @@ -177,15 +177,15 @@ def test_fused_qkv_projections(self): image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) def test_pag_disable_enable(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator @@ -198,9 +198,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() diff --git a/tests/pipelines/pag/test_pag_kolors.py b/tests/pipelines/pag/test_pag_kolors.py index 9a4f1daa2c05..c9f197b703ef 100644 --- a/tests/pipelines/pag/test_pag_kolors.py +++ b/tests/pipelines/pag/test_pag_kolors.py @@ -140,9 +140,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_pixart_sigma.py b/tests/pipelines/pag/test_pag_pixart_sigma.py index 63f42416dbca..624b57844390 100644 --- a/tests/pipelines/pag/test_pag_pixart_sigma.py +++ b/tests/pipelines/pag/test_pag_pixart_sigma.py @@ -120,9 +120,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe.__class__.__name__}." + ) out = pipe(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 diff --git a/tests/pipelines/pag/test_pag_sana.py b/tests/pipelines/pag/test_pag_sana.py index a2c657297860..ee1e359383e9 100644 --- a/tests/pipelines/pag/test_pag_sana.py +++ b/tests/pipelines/pag/test_pag_sana.py @@ -268,9 +268,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() diff --git a/tests/pipelines/pag/test_pag_sd.py b/tests/pipelines/pag/test_pag_sd.py index d4cf00b034ff..bc20226873f6 100644 --- a/tests/pipelines/pag/test_pag_sd.py +++ b/tests/pipelines/pag/test_pag_sd.py @@ -154,9 +154,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -328,9 +328,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) def test_pag_uncond(self): pipeline = AutoPipelineForText2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -345,6 +345,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/tests/pipelines/pag/test_pag_sd3.py b/tests/pipelines/pag/test_pag_sd3.py index 41ff0c3c09f4..737e238e5fbf 100644 --- a/tests/pipelines/pag/test_pag_sd3.py +++ b/tests/pipelines/pag/test_pag_sd3.py @@ -170,9 +170,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -186,15 +186,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) def test_pag_disable_enable(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator @@ -207,9 +207,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() diff --git a/tests/pipelines/pag/test_pag_sd3_img2img.py b/tests/pipelines/pag/test_pag_sd3_img2img.py index 2fe988929185..fe593d47dc75 100644 --- a/tests/pipelines/pag/test_pag_sd3_img2img.py +++ b/tests/pipelines/pag/test_pag_sd3_img2img.py @@ -149,9 +149,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() @@ -254,9 +254,9 @@ def test_pag_cfg(self): 0.17822266, ] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) def test_pag_uncond(self): pipeline = AutoPipelineForImage2Image.from_pretrained( @@ -272,6 +272,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.1508789, 0.16210938, 0.17138672, 0.16210938, 0.17089844, 0.16137695, 0.16235352, 0.16430664, 0.16455078] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/tests/pipelines/pag/test_pag_sd_img2img.py b/tests/pipelines/pag/test_pag_sd_img2img.py index d000493d6bd1..ef70985571c9 100644 --- a/tests/pipelines/pag/test_pag_sd_img2img.py +++ b/tests/pipelines/pag/test_pag_sd_img2img.py @@ -161,9 +161,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -267,9 +267,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) def test_pag_uncond(self): pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -285,6 +285,6 @@ def test_pag_uncond(self): [0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/tests/pipelines/pag/test_pag_sd_inpaint.py b/tests/pipelines/pag/test_pag_sd_inpaint.py index 06682c111d37..04ec8b216551 100644 --- a/tests/pipelines/pag/test_pag_sd_inpaint.py +++ b/tests/pipelines/pag/test_pag_sd_inpaint.py @@ -302,9 +302,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.38793945, 0.4111328, 0.47924805, 0.39208984, 0.4165039, 0.41674805, 0.37060547, 0.36791992, 0.40625] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) def test_pag_uncond(self): pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -319,6 +319,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.3876953, 0.40356445, 0.4934082, 0.39697266, 0.41674805, 0.41015625, 0.375, 0.36914062, 0.40649414] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/tests/pipelines/pag/test_pag_sdxl.py b/tests/pipelines/pag/test_pag_sdxl.py index b35b2b1d2f7e..fc4ce1067f76 100644 --- a/tests/pipelines/pag/test_pag_sdxl.py +++ b/tests/pipelines/pag/test_pag_sdxl.py @@ -167,9 +167,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -331,9 +331,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.3123679, 0.31725878, 0.32026544, 0.327533, 0.3266391, 0.3303998, 0.33544615, 0.34181812, 0.34102726] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) def test_pag_uncond(self): pipeline = AutoPipelineForText2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -348,6 +348,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.47400922, 0.48650584, 0.4839625, 0.4724013, 0.4890427, 0.49544555, 0.51707107, 0.54299414, 0.5224372] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/tests/pipelines/pag/test_pag_sdxl_img2img.py b/tests/pipelines/pag/test_pag_sdxl_img2img.py index c94a6836de7f..0e5c2cc7f93a 100644 --- a/tests/pipelines/pag/test_pag_sdxl_img2img.py +++ b/tests/pipelines/pag/test_pag_sdxl_img2img.py @@ -215,9 +215,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -316,9 +316,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.20301354, 0.21078318, 0.2021082, 0.20277798, 0.20681083, 0.19562206, 0.20121682, 0.21562952, 0.21277016] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) def test_pag_uncond(self): pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -333,6 +333,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.21303111, 0.22188407, 0.2124992, 0.21365267, 0.18823743, 0.17569828, 0.21113116, 0.19419771, 0.18919235] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/tests/pipelines/pag/test_pag_sdxl_inpaint.py b/tests/pipelines/pag/test_pag_sdxl_inpaint.py index cca5292288b0..854c65cbc761 100644 --- a/tests/pipelines/pag/test_pag_sdxl_inpaint.py +++ b/tests/pipelines/pag/test_pag_sdxl_inpaint.py @@ -220,9 +220,9 @@ def test_pag_disable_enable(self): inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] - assert ( - "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters - ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 @@ -322,9 +322,9 @@ def test_pag_cfg(self): expected_slice = np.array( [0.41385046, 0.39608297, 0.4360491, 0.26872507, 0.32187328, 0.4242474, 0.2603805, 0.34167895, 0.46561807] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) def test_pag_uncond(self): pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) @@ -339,6 +339,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.41597816, 0.39302617, 0.44287828, 0.2687074, 0.28315824, 0.40582314, 0.20877528, 0.2380802, 0.39447647] ) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - ), f"output is different from expected, {image_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/tests/pipelines/pixart_sigma/test_pixart.py b/tests/pipelines/pixart_sigma/test_pixart.py index b220afcfc25a..7084fc9bcec8 100644 --- a/tests/pipelines/pixart_sigma/test_pixart.py +++ b/tests/pipelines/pixart_sigma/test_pixart.py @@ -260,9 +260,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -276,15 +276,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) @slow diff --git a/tests/pipelines/stable_cascade/test_stable_cascade_combined.py b/tests/pipelines/stable_cascade/test_stable_cascade_combined.py index 1765f3a02242..d433a461bd9d 100644 --- a/tests/pipelines/stable_cascade/test_stable_cascade_combined.py +++ b/tests/pipelines/stable_cascade/test_stable_cascade_combined.py @@ -198,12 +198,12 @@ def test_stable_cascade(self): assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 6e17b86639ea..3b5c7a24b4ca 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -293,15 +293,15 @@ def test_stable_diffusion_ays(self): inputs["sigmas"] = sigma_schedule output_sigmas = sd_pipe(**inputs).images - assert ( - np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3 - ), "ays timesteps and ays sigmas should have the same outputs" - assert ( - np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3 - ), "use ays timesteps should have different outputs" - assert ( - np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3 - ), "use ays sigmas should have different outputs" + assert np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3, ( + "ays timesteps and ays sigmas should have the same outputs" + ) + assert np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3, ( + "use ays timesteps should have different outputs" + ) + assert np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3, ( + "use ays sigmas should have different outputs" + ) def test_stable_diffusion_prompt_embeds(self): components = self.get_dummy_components() @@ -656,9 +656,9 @@ def test_freeu_enabled(self): sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) output_freeu = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images - assert not np.allclose( - output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1] - ), "Enabling of FreeU should lead to different results." + assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), ( + "Enabling of FreeU should lead to different results." + ) def test_freeu_disabled(self): components = self.get_dummy_components() @@ -681,9 +681,9 @@ def test_freeu_disabled(self): prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0) ).images - assert np.allclose( - output[0, -3:, -3:, -1], output_no_freeu[0, -3:, -3:, -1] - ), "Disabling of FreeU should lead to results similar to the default pipeline results." + assert np.allclose(output[0, -3:, -3:, -1], output_no_freeu[0, -3:, -3:, -1]), ( + "Disabling of FreeU should lead to results similar to the default pipeline results." + ) def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator @@ -706,15 +706,15 @@ def test_fused_qkv_projections(self): image = sd_pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) def test_pipeline_interrupt(self): components = self.get_dummy_components() diff --git a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py index 38ef6143f4c0..8e2fa77fc083 100644 --- a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py +++ b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py @@ -171,9 +171,9 @@ def test_fused_qkv_projections(self): # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() - assert check_qkv_fusion_processors_exist( - pipe.transformer - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." @@ -187,15 +187,15 @@ def test_fused_qkv_projections(self): image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) def test_skip_guidance_layers(self): components = self.get_dummy_components() diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py index c68cdf67036a..a41e7dc7f342 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py @@ -242,15 +242,15 @@ def test_stable_diffusion_ays(self): inputs["sigmas"] = sigma_schedule output_sigmas = sd_pipe(**inputs).images - assert ( - np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3 - ), "ays timesteps and ays sigmas should have the same outputs" - assert ( - np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3 - ), "use ays timesteps should have different outputs" - assert ( - np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3 - ), "use ays sigmas should have different outputs" + assert np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3, ( + "ays timesteps and ays sigmas should have the same outputs" + ) + assert np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3, ( + "use ays timesteps should have different outputs" + ) + assert np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3, ( + "use ays sigmas should have different outputs" + ) def test_ip_adapter(self): expected_pipe_slice = None @@ -742,9 +742,9 @@ def new_step(self, *args, **kwargs): inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] - assert ( - expected_steps_1 == done_steps - ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + assert expected_steps_1 == done_steps, ( + f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + ) with self.assertRaises(ValueError) as cm: inputs_2 = { @@ -771,9 +771,9 @@ def new_step(self, *args, **kwargs): pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] - assert ( - expected_steps == done_steps - ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + assert expected_steps == done_steps, ( + f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + ) for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py index 66ae581a0529..729c6981d2b5 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py @@ -585,9 +585,9 @@ def new_step(self, *args, **kwargs): inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] - assert ( - expected_steps_1 == done_steps - ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + assert expected_steps_1 == done_steps, ( + f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + ) inputs_2 = { **inputs, @@ -601,9 +601,9 @@ def new_step(self, *args, **kwargs): pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] - assert ( - expected_steps == done_steps - ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + assert expected_steps == done_steps, ( + f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + ) for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): diff --git a/tests/pipelines/test_pipelines.py b/tests/pipelines/test_pipelines.py index ae5a12e04ba8..00c7636ed9fd 100644 --- a/tests/pipelines/test_pipelines.py +++ b/tests/pipelines/test_pipelines.py @@ -167,9 +167,9 @@ def test_one_request_upon_cached(self): download_requests = [r.method for r in m.request_history] assert download_requests.count("HEAD") == 15, "15 calls to files" assert download_requests.count("GET") == 17, "15 calls to files + model_info + model_index.json" - assert ( - len(download_requests) == 32 - ), "2 calls per file (15 files) + send_telemetry, model_info and model_index.json" + assert len(download_requests) == 32, ( + "2 calls per file (15 files) + send_telemetry, model_info and model_index.json" + ) with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( @@ -179,9 +179,9 @@ def test_one_request_upon_cached(self): cache_requests = [r.method for r in m.request_history] assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" assert cache_requests.count("GET") == 1, "model info is only GET" - assert ( - len(cache_requests) == 2 - ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" + assert len(cache_requests) == 2, ( + "We should call only `model_info` to check for _commit hash and `send_telemetry`" + ) def test_less_downloads_passed_object(self): with tempfile.TemporaryDirectory() as tmpdirname: @@ -217,9 +217,9 @@ def test_less_downloads_passed_object_calls(self): assert download_requests.count("HEAD") == 13, "13 calls to files" # 17 - 2 because no call to config or model file for `safety_checker` assert download_requests.count("GET") == 15, "13 calls to files + model_info + model_index.json" - assert ( - len(download_requests) == 28 - ), "2 calls per file (13 files) + send_telemetry, model_info and model_index.json" + assert len(download_requests) == 28, ( + "2 calls per file (13 files) + send_telemetry, model_info and model_index.json" + ) with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( @@ -229,9 +229,9 @@ def test_less_downloads_passed_object_calls(self): cache_requests = [r.method for r in m.request_history] assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" assert cache_requests.count("GET") == 1, "model info is only GET" - assert ( - len(cache_requests) == 2 - ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" + assert len(cache_requests) == 2, ( + "We should call only `model_info` to check for _commit hash and `send_telemetry`" + ) def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index b69669464d90..be5245796b35 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -191,12 +191,12 @@ def test_freeu(self): inputs["output_type"] = "np" output_no_freeu = pipe(**inputs)[0] - assert not np.allclose( - output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1] - ), "Enabling of FreeU should lead to different results." - assert np.allclose( - output, output_no_freeu, atol=1e-2 - ), f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}." + assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), ( + "Enabling of FreeU should lead to different results." + ) + assert np.allclose(output, output_no_freeu, atol=1e-2), ( + f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}." + ) def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator @@ -217,12 +217,12 @@ def test_fused_qkv_projections(self): and hasattr(component, "original_attn_processors") and component.original_attn_processors is not None ): - assert check_qkv_fusion_processors_exist( - component - ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." - assert check_qkv_fusion_matches_attn_procs_length( - component, component.original_attn_processors - ), "Something wrong with the attention processors concerning the fused QKV projections." + assert check_qkv_fusion_processors_exist(component), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length(component, component.original_attn_processors), ( + "Something wrong with the attention processors concerning the fused QKV projections." + ) inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False @@ -235,15 +235,15 @@ def test_fused_qkv_projections(self): image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] - assert np.allclose( - original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 - ), "Fusion of QKV projections shouldn't affect the outputs." - assert np.allclose( - image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." - assert np.allclose( - original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 - ), "Original outputs should match when fused QKV projections are disabled." + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) class IPAdapterTesterMixin: @@ -909,9 +909,9 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): for component in pipe_original.components.values(): if hasattr(component, "attn_processors"): - assert all( - type(proc) == AttnProcessor for proc in component.attn_processors.values() - ), "`from_pipe` changed the attention processor in original pipeline." + assert all(type(proc) == AttnProcessor for proc in component.attn_processors.values()), ( + "`from_pipe` changed the attention processor in original pipeline." + ) @require_accelerator @require_accelerate_version_greater("0.14.0") @@ -2569,12 +2569,12 @@ def test_pyramid_attention_broadcast_inference(self, expected_atol: float = 0.2) image_slice_pab_disabled = output.flatten() image_slice_pab_disabled = np.concatenate((image_slice_pab_disabled[:8], image_slice_pab_disabled[-8:])) - assert np.allclose( - original_image_slice, image_slice_pab_enabled, atol=expected_atol - ), "PAB outputs should not differ much in specified timestep range." - assert np.allclose( - original_image_slice, image_slice_pab_disabled, atol=1e-4 - ), "Outputs from normal inference and after disabling cache should not differ." + assert np.allclose(original_image_slice, image_slice_pab_enabled, atol=expected_atol), ( + "PAB outputs should not differ much in specified timestep range." + ) + assert np.allclose(original_image_slice, image_slice_pab_disabled, atol=1e-4), ( + "Outputs from normal inference and after disabling cache should not differ." + ) class FasterCacheTesterMixin: @@ -2639,12 +2639,12 @@ def run_forward(pipe): output = run_forward(pipe).flatten() image_slice_faster_cache_disabled = np.concatenate((output[:8], output[-8:])) - assert np.allclose( - original_image_slice, image_slice_faster_cache_enabled, atol=expected_atol - ), "FasterCache outputs should not differ much in specified timestep range." - assert np.allclose( - original_image_slice, image_slice_faster_cache_disabled, atol=1e-4 - ), "Outputs from normal inference and after disabling cache should not differ." + assert np.allclose(original_image_slice, image_slice_faster_cache_enabled, atol=expected_atol), ( + "FasterCache outputs should not differ much in specified timestep range." + ) + assert np.allclose(original_image_slice, image_slice_faster_cache_disabled, atol=1e-4), ( + "Outputs from normal inference and after disabling cache should not differ." + ) def test_faster_cache_state(self): from diffusers.hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK diff --git a/tests/pipelines/wuerstchen/test_wuerstchen_combined.py b/tests/pipelines/wuerstchen/test_wuerstchen_combined.py index 084d62a8c613..fa544c91f2d9 100644 --- a/tests/pipelines/wuerstchen/test_wuerstchen_combined.py +++ b/tests/pipelines/wuerstchen/test_wuerstchen_combined.py @@ -191,12 +191,12 @@ def test_wuerstchen(self): expected_slice = np.array([0.7616304, 0.0, 1.0, 0.0, 1.0, 0.0, 0.05925313, 0.0, 0.951898]) - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) @require_torch_accelerator def test_offloads(self): diff --git a/tests/schedulers/test_scheduler_dpm_multi.py b/tests/schedulers/test_scheduler_dpm_multi.py index 55b3202ad0be..28c354709dc9 100644 --- a/tests/schedulers/test_scheduler_dpm_multi.py +++ b/tests/schedulers/test_scheduler_dpm_multi.py @@ -357,9 +357,9 @@ def test_custom_timesteps(self): prediction_type=prediction_type, final_sigmas_type=final_sigmas_type, ) - assert ( - torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 - ), f"Scheduler outputs are not identical for algorithm_type: {algorithm_type}, prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for algorithm_type: {algorithm_type}, prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" + ) def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True) diff --git a/tests/schedulers/test_scheduler_dpm_single.py b/tests/schedulers/test_scheduler_dpm_single.py index 7cbaa5cc5e8d..0756a5ed71ff 100644 --- a/tests/schedulers/test_scheduler_dpm_single.py +++ b/tests/schedulers/test_scheduler_dpm_single.py @@ -345,9 +345,9 @@ def test_custom_timesteps(self): lower_order_final=lower_order_final, final_sigmas_type=final_sigmas_type, ) - assert ( - torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 - ), f"Scheduler outputs are not identical for prediction_type: {prediction_type}, lower_order_final: {lower_order_final} and final_sigmas_type: {final_sigmas_type}" + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for prediction_type: {prediction_type}, lower_order_final: {lower_order_final} and final_sigmas_type: {final_sigmas_type}" + ) def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True) diff --git a/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py b/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py index e97d64ec5f1d..8525ce61c40d 100644 --- a/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py +++ b/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py @@ -188,9 +188,9 @@ def test_solver_order_and_type(self): prediction_type=prediction_type, algorithm_type=algorithm_type, ) - assert ( - not torch.isnan(sample).any() - ), f"Samples have nan numbers, {order}, {solver_type}, {prediction_type}, {algorithm_type}" + assert not torch.isnan(sample).any(), ( + f"Samples have nan numbers, {order}, {solver_type}, {prediction_type}, {algorithm_type}" + ) def test_lower_order_final(self): self.check_over_configs(lower_order_final=True) diff --git a/tests/schedulers/test_scheduler_euler.py b/tests/schedulers/test_scheduler_euler.py index 4c7e02442cd0..01e173a631cd 100644 --- a/tests/schedulers/test_scheduler_euler.py +++ b/tests/schedulers/test_scheduler_euler.py @@ -245,9 +245,9 @@ def test_custom_timesteps(self): interpolation_type=interpolation_type, final_sigmas_type=final_sigmas_type, ) - assert ( - torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 - ), f"Scheduler outputs are not identical for prediction_type: {prediction_type}, interpolation_type: {interpolation_type} and final_sigmas_type: {final_sigmas_type}" + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for prediction_type: {prediction_type}, interpolation_type: {interpolation_type} and final_sigmas_type: {final_sigmas_type}" + ) def test_custom_sigmas(self): for prediction_type in ["epsilon", "sample", "v_prediction"]: @@ -260,9 +260,9 @@ def test_custom_sigmas(self): prediction_type=prediction_type, final_sigmas_type=final_sigmas_type, ) - assert ( - torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 - ), f"Scheduler outputs are not identical for prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" + ) def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True) diff --git a/tests/schedulers/test_scheduler_heun.py b/tests/schedulers/test_scheduler_heun.py index 9e060c6d476f..90012f5525ab 100644 --- a/tests/schedulers/test_scheduler_heun.py +++ b/tests/schedulers/test_scheduler_heun.py @@ -216,9 +216,9 @@ def test_custom_timesteps(self): prediction_type=prediction_type, timestep_spacing=timestep_spacing, ) - assert ( - torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5 - ), f"Scheduler outputs are not identical for prediction_type: {prediction_type}, timestep_spacing: {timestep_spacing}" + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for prediction_type: {prediction_type}, timestep_spacing: {timestep_spacing}" + ) def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True) diff --git a/tests/single_file/single_file_testing_utils.py b/tests/single_file/single_file_testing_utils.py index 4e7bc0af6842..4e1713c9ceb1 100644 --- a/tests/single_file/single_file_testing_utils.py +++ b/tests/single_file/single_file_testing_utils.py @@ -72,9 +72,9 @@ def _compare_component_configs(self, pipe, single_file_pipe): continue assert component_name in pipe.components, f"single file {component_name} not found in pretrained pipeline" - assert isinstance( - component, pipe.components[component_name].__class__ - ), f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" + assert isinstance(component, pipe.components[component_name].__class__), ( + f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" + ) for param_name, param_value in component.config.items(): if param_name in PARAMS_TO_IGNORE: @@ -85,9 +85,9 @@ def _compare_component_configs(self, pipe, single_file_pipe): if param_name == "upcast_attention" and pipe.components[component_name].config[param_name] is None: pipe.components[component_name].config[param_name] = param_value - assert ( - pipe.components[component_name].config[param_name] == param_value - ), f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" + assert pipe.components[component_name].config[param_name] == param_value, ( + f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" + ) def test_single_file_components(self, pipe=None, single_file_pipe=None): single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( @@ -253,9 +253,9 @@ def _compare_component_configs(self, pipe, single_file_pipe): continue assert component_name in pipe.components, f"single file {component_name} not found in pretrained pipeline" - assert isinstance( - component, pipe.components[component_name].__class__ - ), f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" + assert isinstance(component, pipe.components[component_name].__class__), ( + f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" + ) for param_name, param_value in component.config.items(): if param_name in PARAMS_TO_IGNORE: @@ -266,9 +266,9 @@ def _compare_component_configs(self, pipe, single_file_pipe): if param_name == "upcast_attention" and pipe.components[component_name].config[param_name] is None: pipe.components[component_name].config[param_name] = param_value - assert ( - pipe.components[component_name].config[param_name] == param_value - ), f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" + assert pipe.components[component_name].config[param_name] == param_value, ( + f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" + ) def test_single_file_components(self, pipe=None, single_file_pipe=None): single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( diff --git a/tests/single_file/test_lumina2_transformer.py b/tests/single_file/test_lumina2_transformer.py index 78e68c4c2df0..d3ffd4fc3a55 100644 --- a/tests/single_file/test_lumina2_transformer.py +++ b/tests/single_file/test_lumina2_transformer.py @@ -60,9 +60,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: diff --git a/tests/single_file/test_model_autoencoder_dc_single_file.py b/tests/single_file/test_model_autoencoder_dc_single_file.py index b1faeb78776b..31b2eb6e36b0 100644 --- a/tests/single_file/test_model_autoencoder_dc_single_file.py +++ b/tests/single_file/test_model_autoencoder_dc_single_file.py @@ -87,9 +87,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between pretrained loading and single file loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) def test_single_file_in_type_variant_components(self): # `in` variant checkpoints require passing in a `config` parameter @@ -106,9 +106,9 @@ def test_single_file_in_type_variant_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between pretrained loading and single file loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) def test_single_file_mix_type_variant_components(self): repo_id = "mit-han-lab/dc-ae-f128c512-mix-1.0-diffusers" @@ -121,6 +121,6 @@ def test_single_file_mix_type_variant_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between pretrained loading and single file loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) diff --git a/tests/single_file/test_model_controlnet_single_file.py b/tests/single_file/test_model_controlnet_single_file.py index bfcb802380a6..3580d73531a3 100644 --- a/tests/single_file/test_model_controlnet_single_file.py +++ b/tests/single_file/test_model_controlnet_single_file.py @@ -58,9 +58,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) def test_single_file_arguments(self): model_default = self.model_class.from_single_file(self.ckpt_path) diff --git a/tests/single_file/test_model_flux_transformer_single_file.py b/tests/single_file/test_model_flux_transformer_single_file.py index 0ec97db26a9e..bf11faaa9c0e 100644 --- a/tests/single_file/test_model_flux_transformer_single_file.py +++ b/tests/single_file/test_model_flux_transformer_single_file.py @@ -58,9 +58,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: diff --git a/tests/single_file/test_model_motion_adapter_single_file.py b/tests/single_file/test_model_motion_adapter_single_file.py index b195f25d094b..a747f16dc1db 100644 --- a/tests/single_file/test_model_motion_adapter_single_file.py +++ b/tests/single_file/test_model_motion_adapter_single_file.py @@ -40,9 +40,9 @@ def test_single_file_components_version_v1_5(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between pretrained loading and single file loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) def test_single_file_components_version_v1_5_2(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt" @@ -55,9 +55,9 @@ def test_single_file_components_version_v1_5_2(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between pretrained loading and single file loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) def test_single_file_components_version_v1_5_3(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt" @@ -70,9 +70,9 @@ def test_single_file_components_version_v1_5_3(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between pretrained loading and single file loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) def test_single_file_components_version_sdxl_beta(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt" @@ -85,6 +85,6 @@ def test_single_file_components_version_sdxl_beta(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between pretrained loading and single file loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) diff --git a/tests/single_file/test_model_sd_cascade_unet_single_file.py b/tests/single_file/test_model_sd_cascade_unet_single_file.py index 08b04e3cd7e8..92b371c3fb41 100644 --- a/tests/single_file/test_model_sd_cascade_unet_single_file.py +++ b/tests/single_file/test_model_sd_cascade_unet_single_file.py @@ -60,9 +60,9 @@ def test_single_file_components_stage_b(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) def test_single_file_components_stage_b_lite(self): model_single_file = StableCascadeUNet.from_single_file( @@ -77,9 +77,9 @@ def test_single_file_components_stage_b_lite(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) def test_single_file_components_stage_c(self): model_single_file = StableCascadeUNet.from_single_file( @@ -94,9 +94,9 @@ def test_single_file_components_stage_c(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) def test_single_file_components_stage_c_lite(self): model_single_file = StableCascadeUNet.from_single_file( @@ -111,6 +111,6 @@ def test_single_file_components_stage_c_lite(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) diff --git a/tests/single_file/test_model_vae_single_file.py b/tests/single_file/test_model_vae_single_file.py index 9db4cddb3c9d..bba1726ae380 100644 --- a/tests/single_file/test_model_vae_single_file.py +++ b/tests/single_file/test_model_vae_single_file.py @@ -91,9 +91,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between pretrained loading and single file loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) def test_single_file_arguments(self): model_default = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id) diff --git a/tests/single_file/test_model_wan_autoencoder_single_file.py b/tests/single_file/test_model_wan_autoencoder_single_file.py index f5720ddd3964..7f0e1c1a4b0b 100644 --- a/tests/single_file/test_model_wan_autoencoder_single_file.py +++ b/tests/single_file/test_model_wan_autoencoder_single_file.py @@ -56,6 +56,6 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) diff --git a/tests/single_file/test_model_wan_transformer3d_single_file.py b/tests/single_file/test_model_wan_transformer3d_single_file.py index 9b938aa1754c..36f0919cacb5 100644 --- a/tests/single_file/test_model_wan_transformer3d_single_file.py +++ b/tests/single_file/test_model_wan_transformer3d_single_file.py @@ -57,9 +57,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) @require_big_gpu_with_torch_cuda @@ -88,6 +88,6 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) diff --git a/tests/single_file/test_sana_transformer.py b/tests/single_file/test_sana_transformer.py index 7695e1577711..802ca37abfc3 100644 --- a/tests/single_file/test_sana_transformer.py +++ b/tests/single_file/test_sana_transformer.py @@ -47,9 +47,9 @@ def test_single_file_components(self): for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue - assert ( - model.config[param_name] == param_value - ), f"{param_name} differs between single file loading and pretrained loading" + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: