Skip to content

Commit 6513201

Browse files
committed
add more cases
1 parent e00bcca commit 6513201

File tree

8 files changed

+31
-39
lines changed

8 files changed

+31
-39
lines changed

tests/pipelines/animatediff/test_animatediff_sdxl.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,6 @@ def test_inference_batch_single_identical(
212212
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
213213
assert max_diff < expected_max_diff
214214

215-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
216215
def test_to_device(self):
217216
components = self.get_dummy_components()
218217
pipe = self.pipeline_class(**components)
@@ -228,14 +227,14 @@ def test_to_device(self):
228227
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
229228
self.assertTrue(np.isnan(output_cpu).sum() == 0)
230229

231-
pipe.to("cuda")
230+
pipe.to(torch_device)
232231
model_devices = [
233232
component.device.type for component in pipe.components.values() if hasattr(component, "device")
234233
]
235-
self.assertTrue(all(device == "cuda" for device in model_devices))
234+
self.assertTrue(all(device == torch_device for device in model_devices))
236235

237-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
238-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
236+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
237+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
239238

240239
def test_to_dtype(self):
241240
components = self.get_dummy_components()

tests/pipelines/animatediff/test_animatediff_sparsectrl.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,6 @@ def test_inference_batch_single_identical_use_simplified_condition_embedding_tru
345345
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
346346
assert max_diff < expected_max_diff
347347

348-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
349348
def test_to_device(self):
350349
components = self.get_dummy_components()
351350
pipe = self.pipeline_class(**components)
@@ -361,13 +360,13 @@ def test_to_device(self):
361360
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
362361
self.assertTrue(np.isnan(output_cpu).sum() == 0)
363362

364-
pipe.to("cuda")
363+
pipe.to(torch_device)
365364
model_devices = [
366365
component.device.type for component in pipe.components.values() if hasattr(component, "device")
367366
]
368-
self.assertTrue(all(device == "cuda" for device in model_devices))
367+
self.assertTrue(all(device == torch_device for device in model_devices))
369368

370-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
369+
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
371370
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
372371

373372
def test_to_dtype(self):

tests/pipelines/animatediff/test_animatediff_video2video.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -258,7 +258,6 @@ def test_inference_batch_single_identical(
258258
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
259259
assert max_diff < expected_max_diff
260260

261-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
262261
def test_to_device(self):
263262
components = self.get_dummy_components()
264263
pipe = self.pipeline_class(**components)
@@ -274,14 +273,14 @@ def test_to_device(self):
274273
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
275274
self.assertTrue(np.isnan(output_cpu).sum() == 0)
276275

277-
pipe.to("cuda")
276+
pipe.to(torch_device)
278277
model_devices = [
279278
component.device.type for component in pipe.components.values() if hasattr(component, "device")
280279
]
281-
self.assertTrue(all(device == "cuda" for device in model_devices))
280+
self.assertTrue(all(device == torch_device for device in model_devices))
282281

283-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
284-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
282+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
283+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
285284

286285
def test_to_dtype(self):
287286
components = self.get_dummy_components()

tests/pipelines/controlnet_xs/test_controlnetxs.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,6 @@ def test_multi_vae(self):
306306

307307
assert out_vae_np.shape == out_np.shape
308308

309-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
310309
def test_to_device(self):
311310
components = self.get_dummy_components()
312311
pipe = self.pipeline_class(**components)
@@ -322,14 +321,14 @@ def test_to_device(self):
322321
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
323322
self.assertTrue(np.isnan(output_cpu).sum() == 0)
324323

325-
pipe.to("cuda")
324+
pipe.to(torch_device)
326325
model_devices = [
327326
component.device.type for component in pipe.components.values() if hasattr(component, "device")
328327
]
329-
self.assertTrue(all(device == "cuda" for device in model_devices))
328+
self.assertTrue(all(device == torch_device for device in model_devices))
330329

331-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
332-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
330+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
331+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
333332

334333

335334
@slow

tests/pipelines/pag/test_pag_animatediff.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,6 @@ def test_dict_tuple_outputs_equivalent(self):
218218
expected_slice = np.array([0.5295, 0.3947, 0.5300, 0.4864, 0.4518, 0.5315, 0.5440, 0.4775, 0.5538])
219219
return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice)
220220

221-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
222221
def test_to_device(self):
223222
components = self.get_dummy_components()
224223
pipe = self.pipeline_class(**components)
@@ -234,14 +233,14 @@ def test_to_device(self):
234233
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
235234
self.assertTrue(np.isnan(output_cpu).sum() == 0)
236235

237-
pipe.to("cuda")
236+
pipe.to(torch_device)
238237
model_devices = [
239238
component.device.type for component in pipe.components.values() if hasattr(component, "device")
240239
]
241-
self.assertTrue(all(device == "cuda" for device in model_devices))
240+
self.assertTrue(all(device == torch_device for device in model_devices))
242241

243-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
244-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
242+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
243+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
245244

246245
def test_to_dtype(self):
247246
components = self.get_dummy_components()

tests/pipelines/pia/test_pia.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,6 @@ def test_inference_batch_single_identical(
278278
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
279279
assert max_diff < expected_max_diff
280280

281-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
282281
def test_to_device(self):
283282
components = self.get_dummy_components()
284283
pipe = self.pipeline_class(**components)
@@ -294,14 +293,14 @@ def test_to_device(self):
294293
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
295294
self.assertTrue(np.isnan(output_cpu).sum() == 0)
296295

297-
pipe.to("cuda")
296+
pipe.to(torch_device)
298297
model_devices = [
299298
component.device.type for component in pipe.components.values() if hasattr(component, "device")
300299
]
301-
self.assertTrue(all(device == "cuda" for device in model_devices))
300+
self.assertTrue(all(device == torch_device for device in model_devices))
302301

303-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
304-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
302+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
303+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
305304

306305
def test_to_dtype(self):
307306
components = self.get_dummy_components()

tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,6 @@ def test_save_load_local(self, expected_max_difference=9e-4):
365365
max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
366366
self.assertLess(max_diff, expected_max_difference)
367367

368-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
369368
def test_to_device(self):
370369
components = self.get_dummy_components()
371370
pipe = self.pipeline_class(**components)
@@ -380,14 +379,14 @@ def test_to_device(self):
380379
output_cpu = pipe(**self.get_dummy_inputs("cpu")).frames[0]
381380
self.assertTrue(np.isnan(output_cpu).sum() == 0)
382381

383-
pipe.to("cuda")
382+
pipe.to(torch_device)
384383
model_devices = [
385384
component.device.type for component in pipe.components.values() if hasattr(component, "device")
386385
]
387-
self.assertTrue(all(device == "cuda" for device in model_devices))
386+
self.assertTrue(all(device == torch_device for device in model_devices))
388387

389-
output_cuda = pipe(**self.get_dummy_inputs("cuda")).frames[0]
390-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
388+
output_device = pipe(**self.get_dummy_inputs(torch_device)).frames[0]
389+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
391390

392391
def test_to_dtype(self):
393392
components = self.get_dummy_components()

tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,6 @@ def test_save_load_optional_components(self):
329329
def test_sequential_cpu_offload_forward_pass(self):
330330
pass
331331

332-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
333332
def test_to_device(self):
334333
components = self.get_dummy_components()
335334
pipe = self.pipeline_class(**components)
@@ -342,12 +341,12 @@ def test_to_device(self):
342341
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu
343342
self.assertTrue(np.isnan(output_cpu).sum() == 0)
344343

345-
pipe.to("cuda")
344+
pipe.to(torch_device)
346345
model_devices = [component.device.type for component in components.values() if hasattr(component, "device")]
347-
self.assertTrue(all(device == "cuda" for device in model_devices))
346+
self.assertTrue(all(device == torch_device for device in model_devices))
348347

349-
output_cuda = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu
350-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
348+
output_device = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu
349+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
351350

352351
@unittest.skip(
353352
reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor."

0 commit comments

Comments
 (0)