Skip to content

Commit fac7616

Browse files
authored
[Tests] Speed up some fast pipeline tests (#7477)
* speed up test_vae_slicing in animatediff * speed up test_karras_schedulers_shape for attend and excite. * style. * get the static slices out. * specify torch print options. * modify * test run with controlnet * specify kwarg * fix: things * not None * flatten * controlnet img2img * complete controlet sd * finish more * finish more * finish more * finish more * finish the final batch * add cpu check for expected_pipe_slice. * finish the rest * remove print * style * fix ssd1b controlnet test * checking ssd1b * disable the test. * make the test_ip_adapter_single controlnet test more robust * fix: simple inpaint * multi * disable panorama * enable again * panorama is shaky so leave it for now * remove print * raise tolerance.
1 parent 34c90db commit fac7616

19 files changed

+263
-15
lines changed

src/diffusers/utils/testing_utils.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,10 +105,21 @@ def numpy_cosine_similarity_distance(a, b):
105105
return distance
106106

107107

108-
def print_tensor_test(tensor, filename="test_corrections.txt", expected_tensor_name="expected_slice"):
108+
def print_tensor_test(
109+
tensor,
110+
limit_to_slices=None,
111+
max_torch_print=None,
112+
filename="test_corrections.txt",
113+
expected_tensor_name="expected_slice",
114+
):
115+
if max_torch_print:
116+
torch.set_printoptions(threshold=10_000)
117+
109118
test_name = os.environ.get("PYTEST_CURRENT_TEST")
110119
if not torch.is_tensor(tensor):
111120
tensor = torch.from_numpy(tensor)
121+
if limit_to_slices:
122+
tensor = tensor[0, -3:, -3:, -1]
112123

113124
tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
114125
# format is usually:
@@ -117,7 +128,7 @@ def print_tensor_test(tensor, filename="test_corrections.txt", expected_tensor_n
117128
test_file, test_class, test_fn = test_name.split("::")
118129
test_fn = test_fn.split()[0]
119130
with open(filename, "a") as f:
120-
print(";".join([test_file, test_class, test_fn, output_str]), file=f)
131+
print("::".join([test_file, test_class, test_fn, output_str]), file=f)
121132

122133

123134
def get_tests_dir(append_path=None):

tests/pipelines/animatediff/test_animatediff.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,42 @@ def test_motion_unet_loading(self):
131131
def test_attention_slicing_forward_pass(self):
132132
pass
133133

134+
def test_ip_adapter_single(self):
135+
expected_pipe_slice = None
136+
if torch_device == "cpu":
137+
expected_pipe_slice = np.array(
138+
[
139+
0.5541,
140+
0.5802,
141+
0.5074,
142+
0.4583,
143+
0.4729,
144+
0.5374,
145+
0.4051,
146+
0.4495,
147+
0.4480,
148+
0.5292,
149+
0.6322,
150+
0.6265,
151+
0.5455,
152+
0.4771,
153+
0.5795,
154+
0.5845,
155+
0.4172,
156+
0.6066,
157+
0.6535,
158+
0.4113,
159+
0.6833,
160+
0.5736,
161+
0.3589,
162+
0.5730,
163+
0.4205,
164+
0.3786,
165+
0.5323,
166+
]
167+
)
168+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
169+
134170
def test_inference_batch_single_identical(
135171
self,
136172
batch_size=2,
@@ -299,6 +335,9 @@ def test_xformers_attention_forwardGenerator_pass(self):
299335
max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()
300336
self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results")
301337

338+
def test_vae_slicing(self):
339+
return super().test_vae_slicing(image_count=2)
340+
302341

303342
@slow
304343
@require_torch_gpu

tests/pipelines/animatediff/test_animatediff_video2video.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,34 @@ def test_motion_unet_loading(self):
135135
def test_attention_slicing_forward_pass(self):
136136
pass
137137

138+
def test_ip_adapter_single(self):
139+
expected_pipe_slice = None
140+
141+
if torch_device == "cpu":
142+
expected_pipe_slice = np.array(
143+
[
144+
0.4947,
145+
0.4780,
146+
0.4340,
147+
0.4666,
148+
0.4028,
149+
0.4645,
150+
0.4915,
151+
0.4101,
152+
0.4308,
153+
0.4581,
154+
0.3582,
155+
0.4953,
156+
0.4466,
157+
0.5348,
158+
0.5863,
159+
0.5299,
160+
0.5213,
161+
0.5017,
162+
]
163+
)
164+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
165+
138166
def test_inference_batch_single_identical(
139167
self,
140168
batch_size=2,

tests/pipelines/controlnet/test_controlnet.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,12 @@ def get_dummy_inputs(self, device, seed=0):
221221
def test_attention_slicing_forward_pass(self):
222222
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
223223

224+
def test_ip_adapter_single(self):
225+
expected_pipe_slice = None
226+
if torch_device == "cpu":
227+
expected_pipe_slice = np.array([0.5234, 0.3333, 0.1745, 0.7605, 0.6224, 0.4637, 0.6989, 0.7526, 0.4665])
228+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
229+
224230
@unittest.skipIf(
225231
torch_device != "cuda" or not is_xformers_available(),
226232
reason="XFormers attention is only available with CUDA and `xformers` installed",
@@ -455,6 +461,12 @@ def test_xformers_attention_forwardGenerator_pass(self):
455461
def test_inference_batch_single_identical(self):
456462
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
457463

464+
def test_ip_adapter_single(self):
465+
expected_pipe_slice = None
466+
if torch_device == "cpu":
467+
expected_pipe_slice = np.array([0.2422, 0.3425, 0.4048, 0.5351, 0.3503, 0.2419, 0.4645, 0.4570, 0.3804])
468+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
469+
458470
def test_save_pretrained_raise_not_implemented_exception(self):
459471
components = self.get_dummy_components()
460472
pipe = self.pipeline_class(**components)
@@ -668,6 +680,12 @@ def test_xformers_attention_forwardGenerator_pass(self):
668680
def test_inference_batch_single_identical(self):
669681
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
670682

683+
def test_ip_adapter_single(self):
684+
expected_pipe_slice = None
685+
if torch_device == "cpu":
686+
expected_pipe_slice = np.array([0.5264, 0.3203, 0.1602, 0.8235, 0.6332, 0.4593, 0.7226, 0.7777, 0.4780])
687+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
688+
671689
def test_save_pretrained_raise_not_implemented_exception(self):
672690
components = self.get_dummy_components()
673691
pipe = self.pipeline_class(**components)

tests/pipelines/controlnet/test_controlnet_img2img.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,12 @@ def get_dummy_inputs(self, device, seed=0):
174174
def test_attention_slicing_forward_pass(self):
175175
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
176176

177+
def test_ip_adapter_single(self):
178+
expected_pipe_slice = None
179+
if torch_device == "cpu":
180+
expected_pipe_slice = np.array([0.7096, 0.5149, 0.3571, 0.5897, 0.4715, 0.4052, 0.6098, 0.6886, 0.4213])
181+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
182+
177183
@unittest.skipIf(
178184
torch_device != "cuda" or not is_xformers_available(),
179185
reason="XFormers attention is only available with CUDA and `xformers` installed",
@@ -366,6 +372,12 @@ def test_xformers_attention_forwardGenerator_pass(self):
366372
def test_inference_batch_single_identical(self):
367373
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
368374

375+
def test_ip_adapter_single(self):
376+
expected_pipe_slice = None
377+
if torch_device == "cpu":
378+
expected_pipe_slice = np.array([0.5293, 0.7339, 0.6642, 0.3950, 0.5212, 0.5175, 0.7002, 0.5907, 0.5182])
379+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
380+
369381
def test_save_pretrained_raise_not_implemented_exception(self):
370382
components = self.get_dummy_components()
371383
pipe = self.pipeline_class(**components)

tests/pipelines/controlnet/test_controlnet_sdxl.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,15 @@ def get_dummy_inputs(self, device, seed=0):
191191
def test_attention_slicing_forward_pass(self):
192192
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
193193

194+
def test_ip_adapter_single(self, from_ssd1b=False, expected_pipe_slice=None):
195+
if not from_ssd1b:
196+
expected_pipe_slice = None
197+
if torch_device == "cpu":
198+
expected_pipe_slice = np.array(
199+
[0.7331, 0.5907, 0.5667, 0.6029, 0.5679, 0.5968, 0.4033, 0.4761, 0.5090]
200+
)
201+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
202+
194203
@unittest.skipIf(
195204
torch_device != "cuda" or not is_xformers_available(),
196205
reason="XFormers attention is only available with CUDA and `xformers` installed",
@@ -1042,6 +1051,12 @@ def test_controlnet_sdxl_guess(self):
10421051
# make sure that it's equal
10431052
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4
10441053

1054+
def test_ip_adapter_single(self):
1055+
expected_pipe_slice = None
1056+
if torch_device == "cpu":
1057+
expected_pipe_slice = np.array([0.6832, 0.5703, 0.5460, 0.6300, 0.5856, 0.6034, 0.4494, 0.4613, 0.5036])
1058+
return super().test_ip_adapter_single(from_ssd1b=True, expected_pipe_slice=expected_pipe_slice)
1059+
10451060
def test_controlnet_sdxl_lcm(self):
10461061
device = "cpu" # ensure determinism for the device-dependent torch.Generator
10471062

tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,12 @@ def get_dummy_inputs(self, device, seed=0):
170170

171171
return inputs
172172

173+
def test_ip_adapter_single(self):
174+
expected_pipe_slice = None
175+
if torch_device == "cpu":
176+
expected_pipe_slice = np.array([0.6265, 0.5441, 0.5384, 0.5446, 0.5810, 0.5908, 0.5414, 0.5428, 0.5353])
177+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
178+
173179
def test_stable_diffusion_xl_controlnet_img2img(self):
174180
device = "cpu" # ensure determinism for the device-dependent torch.Generator
175181
components = self.get_dummy_components()

tests/pipelines/latent_consistency_models/test_latent_consistency_models.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,12 @@ def get_dummy_inputs(self, device, seed=0):
108108
}
109109
return inputs
110110

111+
def test_ip_adapter_single(self):
112+
expected_pipe_slice = None
113+
if torch_device == "cpu":
114+
expected_pipe_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645])
115+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
116+
111117
def test_lcm_onestep(self):
112118
device = "cpu" # ensure determinism for the device-dependent torch.Generator
113119

tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,12 @@ def get_dummy_inputs(self, device, seed=0):
119119
}
120120
return inputs
121121

122+
def test_ip_adapter_single(self):
123+
expected_pipe_slice = None
124+
if torch_device == "cpu":
125+
expected_pipe_slice = np.array([0.4003, 0.3718, 0.2863, 0.5500, 0.5587, 0.3772, 0.4617, 0.4961, 0.4417])
126+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
127+
122128
def test_lcm_onestep(self):
123129
device = "cpu" # ensure determinism for the device-dependent torch.Generator
124130

tests/pipelines/pia/test_pia.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,43 @@ def test_motion_unet_loading(self):
138138

139139
assert isinstance(pipe.unet, UNetMotionModel)
140140

141+
def test_ip_adapter_single(self):
142+
expected_pipe_slice = None
143+
144+
if torch_device == "cpu":
145+
expected_pipe_slice = np.array(
146+
[
147+
0.5609,
148+
0.5756,
149+
0.4830,
150+
0.4420,
151+
0.4547,
152+
0.5129,
153+
0.3779,
154+
0.4042,
155+
0.3772,
156+
0.4450,
157+
0.5710,
158+
0.5536,
159+
0.4835,
160+
0.4308,
161+
0.5578,
162+
0.5578,
163+
0.4395,
164+
0.5440,
165+
0.6051,
166+
0.4651,
167+
0.6258,
168+
0.5662,
169+
0.3988,
170+
0.5108,
171+
0.4153,
172+
0.3993,
173+
0.4803,
174+
]
175+
)
176+
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
177+
141178
@unittest.skip("Attention slicing is not enabled in this pipeline")
142179
def test_attention_slicing_forward_pass(self):
143180
pass

0 commit comments

Comments
 (0)