|
53 | 53 |
|
54 | 54 |
|
55 | 55 | @require_peft_backend |
56 | | -class FluxLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): |
| 56 | +class FluxLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase): |
57 | 57 | pipeline_class = FluxPipeline |
58 | 58 | scheduler_cls = FlowMatchEulerDiscreteScheduler() |
59 | 59 | scheduler_kwargs = {} |
@@ -123,7 +123,7 @@ def test_with_alpha_in_state_dict(self): |
123 | 123 | pipe.set_progress_bar_config(disable=None) |
124 | 124 | _, _, inputs = self.get_dummy_inputs(with_generator=False) |
125 | 125 |
|
126 | | - output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images |
| 126 | + output_no_lora = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler) |
127 | 127 | self.assertTrue(output_no_lora.shape == self.output_shape) |
128 | 128 |
|
129 | 129 | pipe.transformer.add_adapter(denoiser_lora_config) |
@@ -171,7 +171,7 @@ def test_lora_expansion_works_for_absent_keys(self): |
171 | 171 | pipe.set_progress_bar_config(disable=None) |
172 | 172 | _, _, inputs = self.get_dummy_inputs(with_generator=False) |
173 | 173 |
|
174 | | - output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images |
| 174 | + output_no_lora = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler) |
175 | 175 | self.assertTrue(output_no_lora.shape == self.output_shape) |
176 | 176 |
|
177 | 177 | # Modify the config to have a layer which won't be present in the second LoRA we will load. |
@@ -220,7 +220,7 @@ def test_lora_expansion_works_for_extra_keys(self): |
220 | 220 | pipe.set_progress_bar_config(disable=None) |
221 | 221 | _, _, inputs = self.get_dummy_inputs(with_generator=False) |
222 | 222 |
|
223 | | - output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images |
| 223 | + output_no_lora = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler) |
224 | 224 | self.assertTrue(output_no_lora.shape == self.output_shape) |
225 | 225 |
|
226 | 226 | # Modify the config to have a layer which won't be present in the first LoRA we will load. |
@@ -280,7 +280,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): |
280 | 280 | pass |
281 | 281 |
|
282 | 282 |
|
283 | | -class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): |
| 283 | +class FluxControlLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase): |
284 | 284 | pipeline_class = FluxControlPipeline |
285 | 285 | scheduler_cls = FlowMatchEulerDiscreteScheduler() |
286 | 286 | scheduler_kwargs = {} |
@@ -357,7 +357,7 @@ def test_with_norm_in_state_dict(self): |
357 | 357 | logger = logging.get_logger("diffusers.loaders.lora_pipeline") |
358 | 358 | logger.setLevel(logging.INFO) |
359 | 359 |
|
360 | | - original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] |
| 360 | + original_output = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler) |
361 | 361 |
|
362 | 362 | for norm_layer in ["norm_q", "norm_k", "norm_added_q", "norm_added_k"]: |
363 | 363 | norm_state_dict = {} |
@@ -643,7 +643,7 @@ def test_load_regular_lora(self): |
643 | 643 | pipe.set_progress_bar_config(disable=None) |
644 | 644 | _, _, inputs = self.get_dummy_inputs(with_generator=False) |
645 | 645 |
|
646 | | - original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] |
| 646 | + original_output = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler) |
647 | 647 |
|
648 | 648 | out_features, in_features = pipe.transformer.x_embedder.weight.shape |
649 | 649 | rank = 4 |
|
0 commit comments