Skip to content

Commit d1715d3

Browse files
committed
integration tests
1 parent 79d023a commit d1715d3

File tree

1 file changed

+61
-1
lines changed

1 file changed

+61
-1
lines changed

tests/lora/test_lora_layers_flux.py

Lines changed: 61 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,19 +19,23 @@
1919
import unittest
2020

2121
import numpy as np
22+
import pytest
2223
import safetensors.torch
2324
import torch
25+
from parameterized import parameterized
2426
from PIL import Image
2527
from transformers import AutoTokenizer, CLIPTextModel, CLIPTokenizer, T5EncoderModel
2628

2729
from diffusers import FlowMatchEulerDiscreteScheduler, FluxControlPipeline, FluxPipeline, FluxTransformer2DModel
28-
from diffusers.utils import logging
30+
from diffusers.utils import load_image, logging
2931
from diffusers.utils.testing_utils import (
3032
CaptureLogger,
3133
floats_tensor,
3234
is_peft_available,
3335
nightly,
3436
numpy_cosine_similarity_distance,
37+
print_tensor_test,
38+
require_big_gpu_with_torch_cuda,
3539
require_peft_backend,
3640
require_peft_version_greater,
3741
require_torch_gpu,
@@ -578,3 +582,59 @@ def test_flux_xlabs_load_lora_with_single_blocks(self):
578582
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
579583

580584
assert max_diff < 1e-3
585+
586+
587+
@nightly
588+
@require_torch_gpu
589+
@require_peft_backend
590+
@require_big_gpu_with_torch_cuda
591+
@pytest.mark.big_gpu_with_torch_cuda
592+
class FluxControlLoRAIntegrationTests(unittest.TestCase):
593+
num_inference_steps = 10
594+
seed = 0
595+
prompt = "A robot made of exotic candies and chocolates of different kinds."
596+
597+
def setUp(self):
598+
super().setUp()
599+
600+
gc.collect()
601+
torch.cuda.empty_cache()
602+
603+
self.pipeline = FluxControlPipeline.from_pretrained(
604+
"black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
605+
).to("cuda")
606+
607+
def tearDown(self):
608+
super().tearDown()
609+
610+
gc.collect()
611+
torch.cuda.empty_cache()
612+
613+
@parameterized.expand(["black-forest-labs/FLUX.1-Canny-dev-lora", "black-forest-labs/FLUX.1-Depth-dev-lora"])
614+
def test_lora(self, lora_ckpt_id):
615+
self.pipe.load_lora_weights(lora_ckpt_id)
616+
617+
if "Canny" in lora_ckpt_id:
618+
control_image = load_image(
619+
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/canny_condition_image.png"
620+
)
621+
else:
622+
control_image = load_image(
623+
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/depth_condition_image.png"
624+
)
625+
626+
image = self.pipe(
627+
prompt=self.prompt,
628+
control_image=control_image,
629+
height=1024,
630+
width=1024,
631+
num_inference_steps=50,
632+
guidance_scale=30.0 if "Canny" in lora_ckpt_id else 10.0,
633+
output_type="np",
634+
generator=torch.manual_seed(self.seed),
635+
).images
636+
637+
out_slice = image[0, -3:, -3:, -1].flatten()
638+
print_tensor_test(out_slice)
639+
640+
assert out_slice is None

0 commit comments

Comments
 (0)