@@ -825,3 +825,40 @@ def test_lora(self, lora_ckpt_id):
825825 max_diff = numpy_cosine_similarity_distance (expected_slice .flatten (), out_slice )
826826
827827 assert max_diff < 1e-3
828+
829+ @parameterized .expand (["black-forest-labs/FLUX.1-Canny-dev-lora" , "black-forest-labs/FLUX.1-Depth-dev-lora" ])
830+ def test_lora_with_turbo (self , lora_ckpt_id ):
831+ self .pipeline .load_lora_weights (lora_ckpt_id )
832+ self .pipeline .load_lora_weights ("ByteDance/Hyper-SD" , weight_name = "Hyper-FLUX.1-dev-8steps-lora.safetensors" )
833+ self .pipeline .fuse_lora ()
834+ self .pipeline .unload_lora_weights ()
835+
836+ if "Canny" in lora_ckpt_id :
837+ control_image = load_image (
838+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/canny_condition_image.png"
839+ )
840+ else :
841+ control_image = load_image (
842+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/depth_condition_image.png"
843+ )
844+
845+ image = self .pipeline (
846+ prompt = self .prompt ,
847+ control_image = control_image ,
848+ height = 1024 ,
849+ width = 1024 ,
850+ num_inference_steps = self .num_inference_steps ,
851+ guidance_scale = 30.0 if "Canny" in lora_ckpt_id else 10.0 ,
852+ output_type = "np" ,
853+ generator = torch .manual_seed (self .seed ),
854+ ).images
855+
856+ out_slice = image [0 , - 3 :, - 3 :, - 1 ].flatten ()
857+ if "Canny" in lora_ckpt_id :
858+ expected_slice = np .array ([0.6562 , 0.7266 , 0.7578 , 0.6367 , 0.6758 , 0.7031 , 0.6172 , 0.6602 , 0.6484 ])
859+ else :
860+ expected_slice = np .array ([0.6680 , 0.7344 , 0.7656 , 0.6484 , 0.6875 , 0.7109 , 0.6328 , 0.6719 , 0.6562 ])
861+
862+ max_diff = numpy_cosine_similarity_distance (expected_slice .flatten (), out_slice )
863+
864+ assert max_diff < 1e-3
0 commit comments