- 
                Notifications
    You must be signed in to change notification settings 
- Fork 6.5k
[LoRA] feat: support loading regular Flux LoRAs into Flux Control, and Fill #10259
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 9 commits
3d735b4
              8e51289
              5001efe
              ed91c53
              d3e177c
              258a398
              5ef79f3
              4eef79e
              46ba9f6
              3785dfe
              b7269f4
              6da9697
              1d34541
              b9a2670
              7f7b2c1
              4e68e84
              eb2ad02
              143df0c
              d62125a
              ba2d092
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
|  | @@ -430,10 +430,10 @@ def test_correct_lora_configs_with_different_ranks(self): | |
| self.assertTrue(not np.allclose(original_output, lora_output_diff_alpha, atol=1e-3, rtol=1e-3)) | ||
| self.assertTrue(not np.allclose(lora_output_diff_alpha, lora_output_same_rank, atol=1e-3, rtol=1e-3)) | ||
|  | ||
| def test_lora_expanding_shape_with_normal_lora_raises_error(self): | ||
| # TODO: This test checks if an error is raised when a lora expands shapes (like control loras) but | ||
| # another lora with correct shapes is loaded. This is not supported at the moment and should raise an error. | ||
| # When we do support it, this test should be removed. Context: https://github.com/huggingface/diffusers/issues/10180 | ||
| def test_lora_expanding_shape_with_normal_lora(self): | ||
| # This test checks if it works when a lora with expanded shapes (like control loras) but | ||
| # another lora with correct shapes is loaded. The opposite direction isn't supported and is | ||
| # tested with it. | ||
| components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) | ||
|  | ||
| # Change the transformer config to mimic a real use case. | ||
|  | @@ -478,21 +478,16 @@ def test_lora_expanding_shape_with_normal_lora_raises_error(self): | |
| "transformer.x_embedder.lora_B.weight": normal_lora_B.weight, | ||
| } | ||
|  | ||
| # The first lora expanded the input features of x_embedder. Here, we are trying to load a lora with the correct | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So this error is now removed because expanding lora state dicts is now supported There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Exactly! | ||
| # input features before expansion. This should raise an error about the weight shapes being incompatible. | ||
| self.assertRaisesRegex( | ||
| RuntimeError, | ||
| "size mismatch for x_embedder.lora_A.adapter-2.weight", | ||
| pipe.load_lora_weights, | ||
| lora_state_dict, | ||
| "adapter-2", | ||
| ) | ||
| # We should have `adapter-1` as the only adapter. | ||
| self.assertTrue(pipe.get_active_adapters() == ["adapter-1"]) | ||
| with CaptureLogger(logger) as cap_logger: | ||
| pipe.load_lora_weights(lora_state_dict, "adapter-2") | ||
|  | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") | ||
| self.assertTrue(pipe.get_active_adapters() == ["adapter-2"]) | ||
|  | ||
| lora_output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|  | ||
| # Check if the output is the same after lora loading error | ||
| lora_output_after_error = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(np.allclose(lora_output, lora_output_after_error, atol=1e-3, rtol=1e-3)) | ||
| self.assertTrue("Found some LoRA modules for which the weights were zero-padded" in cap_logger.out) | ||
| self.assertFalse(np.allclose(lora_output, lora_output_2, atol=1e-3, rtol=1e-3)) | ||
|  | ||
| # Test the opposite case where the first lora has the correct input features and the second lora has expanded input features. | ||
| # This should raise a runtime error on input shapes being incompatible. But it doesn't. This is because PEFT renames the | ||
|  | @@ -524,8 +519,8 @@ def test_lora_expanding_shape_with_normal_lora_raises_error(self): | |
|  | ||
| with CaptureLogger(logger) as cap_logger: | ||
| pipe.load_lora_weights(lora_state_dict, "adapter-1") | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") | ||
|  | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") | ||
| self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features) | ||
| self.assertTrue(pipe.transformer.config.in_channels == in_features) | ||
| self.assertFalse(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module")) | ||
|  | @@ -535,17 +530,107 @@ def test_lora_expanding_shape_with_normal_lora_raises_error(self): | |
| "transformer.x_embedder.lora_B.weight": shape_expander_lora_B.weight, | ||
| } | ||
|  | ||
| # We should check for input shapes being incompatible here. But because above mentioned issue is | ||
| # not a supported use case, and because of the PEFT renaming, we will currently have a shape | ||
| # mismatch error. | ||
| # We should check for input shapes being incompatible here. | ||
| self.assertRaisesRegex( | ||
| RuntimeError, | ||
| "size mismatch for x_embedder.lora_A.adapter-2.weight", | ||
| "x_embedder.lora_A.weight", | ||
|         
                  sayakpaul marked this conversation as resolved.
              Outdated
          
            Show resolved
            Hide resolved | ||
| pipe.load_lora_weights, | ||
| lora_state_dict, | ||
| "adapter-2", | ||
| ) | ||
|  | ||
| def test_fuse_expanded_lora_with_regular_lora(self): | ||
| # This test checks if it works when a lora with expanded shapes (like control loras) but | ||
| # another lora with correct shapes is loaded. The opposite direction isn't supported and is | ||
| # tested with it. | ||
| components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) | ||
|  | ||
| # Change the transformer config to mimic a real use case. | ||
| num_channels_without_control = 4 | ||
| transformer = FluxTransformer2DModel.from_config( | ||
| components["transformer"].config, in_channels=num_channels_without_control | ||
| ).to(torch_device) | ||
| components["transformer"] = transformer | ||
|  | ||
| pipe = self.pipeline_class(**components) | ||
| pipe = pipe.to(torch_device) | ||
| pipe.set_progress_bar_config(disable=None) | ||
|  | ||
| logger = logging.get_logger("diffusers.loaders.lora_pipeline") | ||
| logger.setLevel(logging.DEBUG) | ||
|  | ||
| out_features, in_features = pipe.transformer.x_embedder.weight.shape | ||
| rank = 4 | ||
|  | ||
| shape_expander_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False) | ||
| shape_expander_lora_B = torch.nn.Linear(rank, out_features, bias=False) | ||
| lora_state_dict = { | ||
| "transformer.x_embedder.lora_A.weight": shape_expander_lora_A.weight, | ||
| "transformer.x_embedder.lora_B.weight": shape_expander_lora_B.weight, | ||
| } | ||
| pipe.load_lora_weights(lora_state_dict, "adapter-1") | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") | ||
|  | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
| lora_output = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|  | ||
| normal_lora_A = torch.nn.Linear(in_features, rank, bias=False) | ||
| normal_lora_B = torch.nn.Linear(rank, out_features, bias=False) | ||
| lora_state_dict = { | ||
| "transformer.x_embedder.lora_A.weight": normal_lora_A.weight, | ||
| "transformer.x_embedder.lora_B.weight": normal_lora_B.weight, | ||
| } | ||
|  | ||
| pipe.load_lora_weights(lora_state_dict, "adapter-2") | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Very cool that this would now be possible! 🔥 | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") | ||
|  | ||
| lora_output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|  | ||
| pipe.set_adapters(["adapter-1", "adapter-2"], [1.0, 1.0]) | ||
| lora_output_3 = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|  | ||
| self.assertFalse(np.allclose(lora_output, lora_output_2, atol=1e-3, rtol=1e-3)) | ||
| self.assertFalse(np.allclose(lora_output, lora_output_3, atol=1e-3, rtol=1e-3)) | ||
| self.assertFalse(np.allclose(lora_output_2, lora_output_3, atol=1e-3, rtol=1e-3)) | ||
|  | ||
| pipe.fuse_lora(lora_scale=1.0, adapter_names=["adapter-1", "adapter-2"]) | ||
| lora_output_4 = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
| self.assertTrue(np.allclose(lora_output_3, lora_output_4, atol=1e-3, rtol=1e-3)) | ||
|  | ||
| def test_load_regular_lora(self): | ||
| # This test checks if a regular lora (think of one trained Flux.1 Dev for example) can be loaded | ||
|         
                  sayakpaul marked this conversation as resolved.
              Outdated
          
            Show resolved
            Hide resolved | ||
| # into the transformer with more input channels than Flux.1 Dev, for example. Some examples of those | ||
| # transformers include Flux Fill, Flux Control, etc. | ||
| components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) | ||
| pipe = self.pipeline_class(**components) | ||
| pipe = pipe.to(torch_device) | ||
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|  | ||
| original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|  | ||
| out_features, in_features = pipe.transformer.x_embedder.weight.shape | ||
| rank = 4 | ||
| in_features = in_features // 2 # to mimic the Flux.1-Dev LoRA. | ||
| normal_lora_A = torch.nn.Linear(in_features, rank, bias=False) | ||
| normal_lora_B = torch.nn.Linear(rank, out_features, bias=False) | ||
| lora_state_dict = { | ||
| "transformer.x_embedder.lora_A.weight": normal_lora_A.weight, | ||
| "transformer.x_embedder.lora_B.weight": normal_lora_B.weight, | ||
| } | ||
|  | ||
| logger = logging.get_logger("diffusers.loaders.lora_pipeline") | ||
| logger.setLevel(logging.INFO) | ||
| with CaptureLogger(logger) as cap_logger: | ||
| pipe.load_lora_weights(lora_state_dict, "adapter-1") | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") | ||
|  | ||
| lora_output = pipe(**inputs, generator=torch.manual_seed(0))[0] | ||
|  | ||
| self.assertTrue("Found some LoRA modules for which the weights were zero-padded" in cap_logger.out) | ||
| self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features * 2) | ||
| self.assertFalse(np.allclose(original_output, lora_output, atol=1e-3, rtol=1e-3)) | ||
|  | ||
| @unittest.skip("Not supported in Flux.") | ||
| def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): | ||
| pass | ||
|  | ||
Uh oh!
There was an error while loading. Please reload this page.