Skip to content

Commit e1f502f

Browse files
committed
update
1 parent 9150ab0 commit e1f502f

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

tests/lora/test_lora_layers_flux.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353

5454

5555
@require_peft_backend
56-
class FluxLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
56+
class FluxLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
5757
pipeline_class = FluxPipeline
5858
scheduler_cls = FlowMatchEulerDiscreteScheduler()
5959
scheduler_kwargs = {}
@@ -123,7 +123,7 @@ def test_with_alpha_in_state_dict(self):
123123
pipe.set_progress_bar_config(disable=None)
124124
_, _, inputs = self.get_dummy_inputs(with_generator=False)
125125

126-
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
126+
output_no_lora = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler)
127127
self.assertTrue(output_no_lora.shape == self.output_shape)
128128

129129
pipe.transformer.add_adapter(denoiser_lora_config)
@@ -171,7 +171,7 @@ def test_lora_expansion_works_for_absent_keys(self):
171171
pipe.set_progress_bar_config(disable=None)
172172
_, _, inputs = self.get_dummy_inputs(with_generator=False)
173173

174-
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
174+
output_no_lora = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler)
175175
self.assertTrue(output_no_lora.shape == self.output_shape)
176176

177177
# Modify the config to have a layer which won't be present in the second LoRA we will load.
@@ -220,7 +220,7 @@ def test_lora_expansion_works_for_extra_keys(self):
220220
pipe.set_progress_bar_config(disable=None)
221221
_, _, inputs = self.get_dummy_inputs(with_generator=False)
222222

223-
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
223+
output_no_lora = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler)
224224
self.assertTrue(output_no_lora.shape == self.output_shape)
225225

226226
# Modify the config to have a layer which won't be present in the first LoRA we will load.
@@ -280,7 +280,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
280280
pass
281281

282282

283-
class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
283+
class FluxControlLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
284284
pipeline_class = FluxControlPipeline
285285
scheduler_cls = FlowMatchEulerDiscreteScheduler()
286286
scheduler_kwargs = {}
@@ -357,7 +357,7 @@ def test_with_norm_in_state_dict(self):
357357
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
358358
logger.setLevel(logging.INFO)
359359

360-
original_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
360+
original_output = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler)
361361

362362
for norm_layer in ["norm_q", "norm_k", "norm_added_q", "norm_added_k"]:
363363
norm_state_dict = {}
@@ -643,7 +643,7 @@ def test_load_regular_lora(self):
643643
pipe.set_progress_bar_config(disable=None)
644644
_, _, inputs = self.get_dummy_inputs(with_generator=False)
645645

646-
original_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
646+
original_output = self.get_base_pipeline_output(FlowMatchEulerDiscreteScheduler)
647647

648648
out_features, in_features = pipe.transformer.x_embedder.weight.shape
649649
rank = 4

0 commit comments

Comments
 (0)