diff --git a/tests/models/transformers/test_models_prior.py b/tests/models/transformers/test_models_prior.py index d2ed10dfa1f6..471c1084c00c 100644 --- a/tests/models/transformers/test_models_prior.py +++ b/tests/models/transformers/test_models_prior.py @@ -132,7 +132,6 @@ def test_output_pretrained(self): output = model(**input)[0] output_slice = output[0, :5].flatten().cpu() - print(output_slice) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. @@ -182,7 +181,6 @@ def test_kandinsky_prior(self, seed, expected_slice): assert list(sample.shape) == [1, 768] output_slice = sample[0, :8].flatten().cpu() - print(output_slice) expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) diff --git a/tests/models/unets/test_models_unet_2d_condition.py b/tests/models/unets/test_models_unet_2d_condition.py index 8ec5b6e9a5e4..57f6e4ee440b 100644 --- a/tests/models/unets/test_models_unet_2d_condition.py +++ b/tests/models/unets/test_models_unet_2d_condition.py @@ -175,8 +175,7 @@ def create_ip_adapter_plus_state_dict(model): ) ip_image_projection_state_dict = OrderedDict() - keys = [k for k in image_projection.state_dict() if "layers." in k] - print(keys) + for k, v in image_projection.state_dict().items(): if "2.to" in k: k = k.replace("2.to", "0.to") diff --git a/tests/pipelines/controlnet/test_flax_controlnet.py b/tests/pipelines/controlnet/test_flax_controlnet.py index bf5564e810ef..c71116dc7927 100644 --- a/tests/pipelines/controlnet/test_flax_controlnet.py +++ b/tests/pipelines/controlnet/test_flax_controlnet.py @@ -78,7 +78,7 @@ def test_canny(self): expected_slice = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) - print(f"output_slice: {output_slice}") + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 def test_pose(self): @@ -123,5 +123,5 @@ def test_pose(self): expected_slice = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) - print(f"output_slice: {output_slice}") + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py index 607a47e08e58..a7f861565cc9 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -308,8 +308,6 @@ def test_kandinsky(self): image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - print(image_from_tuple_slice) - assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0320, 0.0860, 0.4013, 0.0518, 0.2484, 0.5847, 0.4411, 0.2321, 0.4593]) diff --git a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py index effea2619749..4aa48a920fad 100644 --- a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py +++ b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py @@ -146,7 +146,7 @@ def test_ledits_pp_inversion(self): ) latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) - print(latent_slice.flatten()) + expected_slice = np.array([-0.9084, -0.0367, 0.2940, 0.0839, 0.6890, 0.2651, -0.7104, 2.1090, -0.7822]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 @@ -167,12 +167,12 @@ def test_ledits_pp_inversion_batch(self): ) latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) - print(latent_slice.flatten()) + expected_slice = np.array([0.2528, 0.1458, -0.2166, 0.4565, -0.5657, -1.0286, -0.9961, 0.5933, 1.1173]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 latent_slice = sd_pipe.init_latents[1, -1, -3:, -3:].to(device) - print(latent_slice.flatten()) + expected_slice = np.array([-0.0796, 2.0583, 0.5501, 0.5358, 0.0282, -0.2803, -1.0470, 0.7023, -0.0072]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 diff --git a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py index fcfd0aa51b9f..da694175a9f1 100644 --- a/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py +++ b/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py @@ -216,14 +216,14 @@ def test_ledits_pp_inversion_batch(self): ) latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) - print(latent_slice.flatten()) + expected_slice = np.array([0.2528, 0.1458, -0.2166, 0.4565, -0.5656, -1.0286, -0.9961, 0.5933, 1.1172]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 latent_slice = sd_pipe.init_latents[1, -1, -3:, -3:].to(device) - print(latent_slice.flatten()) + expected_slice = np.array([-0.0796, 2.0583, 0.5500, 0.5358, 0.0282, -0.2803, -1.0470, 0.7024, -0.0072]) - print(latent_slice.flatten()) + assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 def test_ledits_pp_warmup_steps(self): diff --git a/tests/pipelines/pag/test_pag_sd.py b/tests/pipelines/pag/test_pag_sd.py index 3979bb170e0b..17e3f7038439 100644 --- a/tests/pipelines/pag/test_pag_sd.py +++ b/tests/pipelines/pag/test_pag_sd.py @@ -318,7 +318,7 @@ def test_pag_cfg(self): image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) - print(image_slice.flatten()) + expected_slice = np.array( [0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484] ) @@ -339,7 +339,6 @@ def test_pag_uncond(self): expected_slice = np.array( [0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867] ) - print(image_slice.flatten()) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 ), f"output is different from expected, {image_slice.flatten()}" diff --git a/tests/pipelines/pag/test_pag_sd_img2img.py b/tests/pipelines/pag/test_pag_sd_img2img.py index ec8cde23c31d..f44204f82486 100644 --- a/tests/pipelines/pag/test_pag_sd_img2img.py +++ b/tests/pipelines/pag/test_pag_sd_img2img.py @@ -255,7 +255,7 @@ def test_pag_cfg(self): image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) - print(image_slice.flatten()) + expected_slice = np.array( [0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484] ) @@ -276,7 +276,7 @@ def test_pag_uncond(self): expected_slice = np.array( [0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867] ) - print(image_slice.flatten()) + assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 ), f"output is different from expected, {image_slice.flatten()}" diff --git a/tests/pipelines/pag/test_pag_sd_inpaint.py b/tests/pipelines/pag/test_pag_sd_inpaint.py index cd175c600d47..a528b66cc72a 100644 --- a/tests/pipelines/pag/test_pag_sd_inpaint.py +++ b/tests/pipelines/pag/test_pag_sd_inpaint.py @@ -292,7 +292,7 @@ def test_pag_cfg(self): image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) - print(image_slice.flatten()) + expected_slice = np.array( [0.38793945, 0.4111328, 0.47924805, 0.39208984, 0.4165039, 0.41674805, 0.37060547, 0.36791992, 0.40625] ) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py index b9b061c060c0..5690caa257b7 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py @@ -206,9 +206,6 @@ def test_stable_diffusion_pix2pix_euler(self): image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - slice = [round(x, 4) for x in image_slice.flatten().tolist()] - print(",".join([str(x) for x in slice])) - assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986]) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py index dc855f44b817..9e4fa767085f 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py @@ -62,7 +62,7 @@ def test_stable_diffusion_flax(self): output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512]) - print(f"output_slice: {output_slice}") + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 @@ -104,5 +104,5 @@ def test_stable_diffusion_dpm_flax(self): output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297]) - print(f"output_slice: {output_slice}") + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py index 8f039980ec24..eeec52dab51d 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py @@ -78,5 +78,5 @@ def test_stable_diffusion_inpaint_pipeline(self): expected_slice = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) - print(f"output_slice: {output_slice}") + assert jnp.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py index 2091af9c0383..7c7b03786563 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py @@ -642,9 +642,6 @@ def test_adapter_sdxl_lcm(self): assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448]) - debug = [str(round(i, 4)) for i in image_slice.flatten().tolist()] - print(",".join(debug)) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_adapter_sdxl_lcm_custom_timesteps(self): @@ -667,7 +664,4 @@ def test_adapter_sdxl_lcm_custom_timesteps(self): assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448]) - debug = [str(round(i, 4)) for i in image_slice.flatten().tolist()] - print(",".join(debug)) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 764be1890cc5..f5494fbade2e 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -1192,7 +1192,6 @@ def _test_inference_batch_consistent( logger.setLevel(level=diffusers.logging.WARNING) for batch_size, batched_input in zip(batch_sizes, batched_inputs): - print(batch_size, batched_input) output = pipe(**batched_input) assert len(output[0]) == batch_size diff --git a/tests/schedulers/test_scheduler_sasolver.py b/tests/schedulers/test_scheduler_sasolver.py index d6d7c029b019..baa2736b2fcc 100644 --- a/tests/schedulers/test_scheduler_sasolver.py +++ b/tests/schedulers/test_scheduler_sasolver.py @@ -103,8 +103,6 @@ def test_full_loop_no_noise(self): elif torch_device in ["cuda"]: assert abs(result_sum.item() - 329.1999816894531) < 1e-2 assert abs(result_mean.item() - 0.4286458194255829) < 1e-3 - else: - print("None") def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] @@ -135,8 +133,6 @@ def test_full_loop_with_v_prediction(self): elif torch_device in ["cuda"]: assert abs(result_sum.item() - 193.4154052734375) < 1e-2 assert abs(result_mean.item() - 0.2518429756164551) < 1e-3 - else: - print("None") def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] @@ -166,8 +162,6 @@ def test_full_loop_device(self): elif torch_device in ["cuda"]: assert abs(result_sum.item() - 337.394287109375) < 1e-2 assert abs(result_mean.item() - 0.4393154978752136) < 1e-3 - else: - print("None") def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] @@ -198,8 +192,6 @@ def test_full_loop_device_karras_sigmas(self): elif torch_device in ["cuda"]: assert abs(result_sum.item() - 837.25537109375) < 1e-2 assert abs(result_mean.item() - 1.0901763439178467) < 1e-2 - else: - print("None") def test_beta_sigmas(self): self.check_over_configs(use_beta_sigmas=True)