Skip to content

Commit ac4f61a

Browse files
committed
make style; make quality; make fix-copies
1 parent 05fcddb commit ac4f61a

File tree

2 files changed

+16
-3
lines changed

2 files changed

+16
-3
lines changed

src/diffusers/utils/dummy_pt_objects.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -843,6 +843,21 @@ def from_pretrained(cls, *args, **kwargs):
843843
requires_backends(cls, ["torch"])
844844

845845

846+
class Flux2Transformer2DModel(metaclass=DummyObject):
847+
_backends = ["torch"]
848+
849+
def __init__(self, *args, **kwargs):
850+
requires_backends(self, ["torch"])
851+
852+
@classmethod
853+
def from_config(cls, *args, **kwargs):
854+
requires_backends(cls, ["torch"])
855+
856+
@classmethod
857+
def from_pretrained(cls, *args, **kwargs):
858+
requires_backends(cls, ["torch"])
859+
860+
846861
class FluxControlNetModel(metaclass=DummyObject):
847862
_backends = ["torch"]
848863

tests/models/transformers/test_models_transformer_flux2.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -212,9 +212,7 @@ def test_lora_exclude_modules(self):
212212
f"{target_module}.lora_B.weight": torch.ones(target_mod_shape[0], lora_rank) * 33,
213213
}
214214
# Passing exclude_modules should no longer be necessary (or even passing target_modules, for that matter).
215-
config = LoraConfig(
216-
r=lora_rank, target_modules=[target_module], exclude_modules=["to_out"]
217-
)
215+
config = LoraConfig(r=lora_rank, target_modules=[target_module], exclude_modules=["to_out"])
218216
inject_adapter_in_model(config, model, adapter_name=adapter_name, state_dict=lora_state_dict)
219217
set_peft_model_state_dict(model, lora_state_dict, adapter_name)
220218
retrieved_lora_state_dict = get_peft_model_state_dict(model, adapter_name=adapter_name)

0 commit comments

Comments
 (0)