Skip to content

Commit 38dedc7

Browse files
committed
support Flux Control LoRA with bnb 8bit.
1 parent c934720 commit 38dedc7

File tree

2 files changed

+55
-2
lines changed

2 files changed

+55
-2
lines changed

src/diffusers/loaders/lora_pipeline.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -81,12 +81,17 @@ def _maybe_dequantize_weight_for_expanded_lora(model, module):
8181
from ..quantizers.gguf.utils import dequantize_gguf_tensor
8282

8383
is_bnb_4bit_quantized = module.weight.__class__.__name__ == "Params4bit"
84+
is_bnb_8bit_quantized = module.weight.__class__.__name__ == "Int8Params"
8485
is_gguf_quantized = module.weight.__class__.__name__ == "GGUFParameter"
8586

8687
if is_bnb_4bit_quantized and not is_bitsandbytes_available():
8788
raise ValueError(
8889
"The checkpoint seems to have been quantized with `bitsandbytes` (4bits). Install `bitsandbytes` to load quantized checkpoints."
8990
)
91+
if is_bnb_8bit_quantized and not is_bitsandbytes_available():
92+
raise ValueError(
93+
"The checkpoint seems to have been quantized with `bitsandbytes` (8bits). Install `bitsandbytes` to load quantized checkpoints."
94+
)
9095
if is_gguf_quantized and not is_gguf_available():
9196
raise ValueError(
9297
"The checkpoint seems to have been quantized with `gguf`. Install `gguf` to load quantized checkpoints."
@@ -97,10 +102,10 @@ def _maybe_dequantize_weight_for_expanded_lora(model, module):
97102
weight_on_cpu = True
98103

99104
device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
100-
if is_bnb_4bit_quantized:
105+
if is_bnb_4bit_quantized or is_bnb_8bit_quantized:
101106
module_weight = dequantize_bnb_weight(
102107
module.weight.to(device) if weight_on_cpu else module.weight,
103-
state=module.weight.quant_state,
108+
state=module.weight.quant_state if is_bnb_4bit_quantized else module.state,
104109
dtype=model.dtype,
105110
).data
106111
elif is_gguf_quantized:

tests/quantization/bnb/test_mixed_int8.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,18 @@
1919
import numpy as np
2020
import pytest
2121
from huggingface_hub import hf_hub_download
22+
from PIL import Image
2223

2324
from diffusers import (
2425
BitsAndBytesConfig,
2526
DiffusionPipeline,
27+
FluxControlPipeline,
2628
FluxTransformer2DModel,
2729
SanaTransformer2DModel,
2830
SD3Transformer2DModel,
2931
logging,
3032
)
33+
from diffusers.quantizers import PipelineQuantizationConfig
3134
from diffusers.utils import is_accelerate_version
3235
from diffusers.utils.testing_utils import (
3336
CaptureLogger,
@@ -39,6 +42,7 @@
3942
numpy_cosine_similarity_distance,
4043
require_accelerate,
4144
require_bitsandbytes_version_greater,
45+
require_peft_backend,
4246
require_peft_version_greater,
4347
require_torch,
4448
require_torch_accelerator,
@@ -696,6 +700,50 @@ def test_lora_loading(self):
696700
self.assertTrue(max_diff < 1e-3)
697701

698702

703+
@require_transformers_version_greater("4.44.0")
704+
@require_peft_backend
705+
class SlowBnb4BitFluxControlWithLoraTests(Base8bitTests):
706+
def setUp(self) -> None:
707+
gc.collect()
708+
backend_empty_cache(torch_device)
709+
710+
self.pipeline_8bit = FluxControlPipeline.from_pretrained(
711+
"black-forest-labs/FLUX.1-dev",
712+
quantization_config=PipelineQuantizationConfig(
713+
quant_backend="bitsandbytes_8bit",
714+
quant_kwargs={"load_in_8bit": True},
715+
components_to_quantize=["transformer", "text_encoder_2"],
716+
),
717+
torch_dtype=torch.float16,
718+
)
719+
self.pipeline_8bit.enable_model_cpu_offload()
720+
721+
def tearDown(self):
722+
del self.pipeline_8bit
723+
724+
gc.collect()
725+
backend_empty_cache(torch_device)
726+
727+
def test_lora_loading(self):
728+
self.pipeline_8bit.load_lora_weights("black-forest-labs/FLUX.1-Canny-dev-lora")
729+
730+
output = self.pipeline_8bit(
731+
prompt=self.prompt,
732+
control_image=Image.new(mode="RGB", size=(256, 256)),
733+
height=256,
734+
width=256,
735+
max_sequence_length=64,
736+
output_type="np",
737+
num_inference_steps=8,
738+
generator=torch.Generator().manual_seed(42),
739+
).images
740+
out_slice = output[0, -3:, -3:, -1].flatten()
741+
expected_slice = np.array([0.2029, 0.2136, 0.2268, 0.1921, 0.1997, 0.2185, 0.2021, 0.2183, 0.2292])
742+
743+
max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice)
744+
self.assertTrue(max_diff < 1e-3, msg=f"{out_slice=} != {expected_slice=}")
745+
746+
699747
@slow
700748
class BaseBnb8bitSerializationTests(Base8bitTests):
701749
def setUp(self):

0 commit comments

Comments
 (0)