Skip to content

Commit 8696fbb

Browse files
committed
allow big lora tests to run on the CI.
1 parent 13e8fde commit 8696fbb

File tree

2 files changed

+20
-3
lines changed

2 files changed

+20
-3
lines changed

tests/lora/test_lora_layers_flux.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,12 @@
3131
numpy_cosine_similarity_distance,
3232
require_peft_backend,
3333
require_torch_gpu,
34+
require_big_gpu_with_torch_cuda,
3435
slow,
3536
torch_device,
37+
print_tensor_test
3638
)
39+
import pytest
3740

3841

3942
if is_peft_available():
@@ -169,8 +172,8 @@ def test_modify_padding_mode(self):
169172
@nightly
170173
@require_torch_gpu
171174
@require_peft_backend
172-
@unittest.skip("We cannot run inference on this model with the current CI hardware")
173-
# TODO (DN6, sayakpaul): move these tests to a beefier GPU
175+
@require_big_gpu_with_torch_cuda
176+
@pytest.mark.big_gpu_with_torch_cuda
174177
class FluxLoRAIntegrationTests(unittest.TestCase):
175178
"""internal note: The integration slices were obtained on audace.
176179
@@ -211,6 +214,7 @@ def test_flux_the_last_ben(self):
211214
generator=torch.manual_seed(self.seed),
212215
).images
213216
out_slice = out[0, -3:, -3:, -1].flatten()
217+
print_tensor_test(out_slice)
214218
expected_slice = np.array([0.1855, 0.1855, 0.1836, 0.1855, 0.1836, 0.1875, 0.1777, 0.1758, 0.2246])
215219

216220
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
@@ -233,6 +237,7 @@ def test_flux_kohya(self):
233237
).images
234238

235239
out_slice = out[0, -3:, -3:, -1].flatten()
240+
print_tensor_test(out_slice)
236241
expected_slice = np.array([0.6367, 0.6367, 0.6328, 0.6367, 0.6328, 0.6289, 0.6367, 0.6328, 0.6484])
237242

238243
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
@@ -255,6 +260,7 @@ def test_flux_kohya_with_text_encoder(self):
255260
).images
256261

257262
out_slice = out[0, -3:, -3:, -1].flatten()
263+
print_tensor_test(out_slice)
258264
expected_slice = np.array([0.4023, 0.4023, 0.4023, 0.3965, 0.3984, 0.3965, 0.3926, 0.3906, 0.4219])
259265

260266
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
@@ -277,6 +283,7 @@ def test_flux_xlabs(self):
277283
generator=torch.manual_seed(self.seed),
278284
).images
279285
out_slice = out[0, -3:, -3:, -1].flatten()
286+
print_tensor_test(out_slice)
280287
expected_slice = np.array([0.3965, 0.4180, 0.4434, 0.4082, 0.4375, 0.4590, 0.4141, 0.4375, 0.4980])
281288

282289
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)

tests/lora/test_lora_layers_sd3.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,12 @@
3434
require_peft_backend,
3535
require_torch_gpu,
3636
torch_device,
37+
slow,
38+
nightly,
39+
require_big_gpu_with_torch_cuda,
40+
print_tensor_test
3741
)
42+
import pytest
3843

3944

4045
if is_peft_available():
@@ -130,9 +135,13 @@ def test_modify_padding_mode(self):
130135
pass
131136

132137

138+
@slow
139+
@nightly
133140
@require_torch_gpu
134141
@require_peft_backend
135-
class LoraSD3IntegrationTests(unittest.TestCase):
142+
@require_big_gpu_with_torch_cuda
143+
@pytest.mark.big_gpu_with_torch_cuda
144+
class SD3LoraIntegrationTests(unittest.TestCase):
136145
pipeline_class = StableDiffusion3Img2ImgPipeline
137146
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"
138147

@@ -173,6 +182,7 @@ def test_sd3_img2img_lora(self):
173182

174183
image = pipe(**inputs).images[0]
175184
image_slice = image[0, :10, :10]
185+
print_tensor_test(image[0, -3:, -3:, -1].flatten())
176186
expected_slice = np.array(
177187
[
178188
0.47827148,

0 commit comments

Comments
 (0)