Skip to content

Commit b9a2670

Browse files
committed
fix
1 parent 1d34541 commit b9a2670

File tree

1 file changed

+1
-16
lines changed

1 file changed

+1
-16
lines changed

tests/lora/test_lora_layers_flux.py

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -340,21 +340,6 @@ def test_lora_parameter_expanded_shapes(self):
340340
self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features)
341341
self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module"))
342342

343-
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
344-
pipe = self.pipeline_class(**components)
345-
pipe = pipe.to(torch_device)
346-
pipe.set_progress_bar_config(disable=None)
347-
dummy_lora_A = torch.nn.Linear(1, rank, bias=False)
348-
dummy_lora_B = torch.nn.Linear(rank, out_features, bias=False)
349-
lora_state_dict = {
350-
"transformer.x_embedder.lora_A.weight": dummy_lora_A.weight,
351-
"transformer.x_embedder.lora_B.weight": dummy_lora_B.weight,
352-
}
353-
# We should error out because lora input features is less than original. We only
354-
# support expanding the module, not shrinking it
355-
with self.assertRaises(RuntimeError):
356-
pipe.load_lora_weights(lora_state_dict, "adapter-1")
357-
358343
@require_peft_version_greater("0.13.2")
359344
def test_lora_B_bias(self):
360345
components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
@@ -486,7 +471,7 @@ def test_lora_expanding_shape_with_normal_lora(self):
486471

487472
lora_output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
488473

489-
self.assertTrue("Found some LoRA modules for which the weights were zero-padded" in cap_logger.out)
474+
self.assertTrue("The following LoRA modules were zero padded to match the state dict of" in cap_logger.out)
490475
self.assertFalse(np.allclose(lora_output, lora_output_2, atol=1e-3, rtol=1e-3))
491476

492477
# Test the opposite case where the first lora has the correct input features and the second lora has expanded input features.

0 commit comments

Comments
 (0)