Skip to content

Commit 8801ea1

Browse files
authored
Merge branch 'main' into fix_syntax_qwenimageEditPlus
2 parents 71b89a3 + 043ab25 commit 8801ea1

23 files changed

+1083
-1162
lines changed

src/diffusers/models/auto_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ def from_pretrained(cls, pretrained_model_or_path: Optional[Union[str, os.PathLi
194194

195195
has_remote_code = "auto_map" in config and cls.__name__ in config["auto_map"]
196196
trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_or_path, has_remote_code)
197-
if not (has_remote_code and trust_remote_code):
197+
if not has_remote_code and trust_remote_code:
198198
raise ValueError(
199199
"Selected model repository does not happear to have any custom code or does not have a valid `config.json` file."
200200
)

src/diffusers/modular_pipelines/components_manager.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
is_accelerate_available,
2626
logging,
2727
)
28+
from ..utils.torch_utils import get_device
2829

2930

3031
if is_accelerate_available():
@@ -161,7 +162,9 @@ def __call__(self, hooks, model_id, model, execution_device):
161162

162163
current_module_size = model.get_memory_footprint()
163164

164-
mem_on_device = torch.cuda.mem_get_info(execution_device.index)[0]
165+
device_type = execution_device.type
166+
device_module = getattr(torch, device_type, torch.cuda)
167+
mem_on_device = device_module.mem_get_info(execution_device.index)[0]
165168
mem_on_device = mem_on_device - self.memory_reserve_margin
166169
if current_module_size < mem_on_device:
167170
return []
@@ -301,7 +304,7 @@ class ComponentsManager:
301304
cm.add("vae", vae_model, collection="sdxl")
302305
303306
# Enable auto offloading
304-
cm.enable_auto_cpu_offload(device="cuda")
307+
cm.enable_auto_cpu_offload()
305308
306309
# Retrieve components
307310
unet = cm.get_one(name="unet", collection="sdxl")
@@ -490,6 +493,8 @@ def remove(self, component_id: str = None):
490493
gc.collect()
491494
if torch.cuda.is_available():
492495
torch.cuda.empty_cache()
496+
if torch.xpu.is_available():
497+
torch.xpu.empty_cache()
493498

494499
# YiYi TODO: rename to search_components for now, may remove this method
495500
def search_components(
@@ -678,7 +683,7 @@ def matches_pattern(component_id, pattern, exact_match=False):
678683

679684
return get_return_dict(matches, return_dict_with_names)
680685

681-
def enable_auto_cpu_offload(self, device: Union[str, int, torch.device] = "cuda", memory_reserve_margin="3GB"):
686+
def enable_auto_cpu_offload(self, device: Union[str, int, torch.device] = None, memory_reserve_margin="3GB"):
682687
"""
683688
Enable automatic CPU offloading for all components.
684689
@@ -704,6 +709,8 @@ def enable_auto_cpu_offload(self, device: Union[str, int, torch.device] = "cuda"
704709

705710
self.disable_auto_cpu_offload()
706711
offload_strategy = AutoOffloadStrategy(memory_reserve_margin=memory_reserve_margin)
712+
if device is None:
713+
device = get_device()
707714
device = torch.device(device)
708715
if device.index is None:
709716
device = torch.device(f"{device.type}:{0}")

src/diffusers/modular_pipelines/modular_pipeline.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,7 @@ def from_pretrained(
323323
trust_remote_code = resolve_trust_remote_code(
324324
trust_remote_code, pretrained_model_name_or_path, has_remote_code
325325
)
326-
if not (has_remote_code and trust_remote_code):
326+
if not has_remote_code and trust_remote_code:
327327
raise ValueError(
328328
"Selected model repository does not happear to have any custom code or does not have a valid `config.json` file."
329329
)

src/diffusers/pipelines/wan/pipeline_wan_vace.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -795,7 +795,7 @@ def __call__(
795795
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
796796

797797
# Simplification of implementation for now
798-
if not isinstance(prompt, str):
798+
if prompt is not None and not isinstance(prompt, str):
799799
raise ValueError("Passing a list of prompts is not yet supported. This may be supported in the future.")
800800
if num_videos_per_prompt != 1:
801801
raise ValueError(

tests/lora/test_lora_layers_auraflow.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@
4343
class AuraFlowLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
4444
pipeline_class = AuraFlowPipeline
4545
scheduler_cls = FlowMatchEulerDiscreteScheduler
46-
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
4746
scheduler_kwargs = {}
4847

4948
transformer_kwargs = {

tests/lora/test_lora_layers_cogvideox.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121

2222
from diffusers import (
2323
AutoencoderKLCogVideoX,
24-
CogVideoXDDIMScheduler,
2524
CogVideoXDPMScheduler,
2625
CogVideoXPipeline,
2726
CogVideoXTransformer3DModel,
@@ -44,7 +43,6 @@ class CogVideoXLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
4443
pipeline_class = CogVideoXPipeline
4544
scheduler_cls = CogVideoXDPMScheduler
4645
scheduler_kwargs = {"timestep_spacing": "trailing"}
47-
scheduler_classes = [CogVideoXDDIMScheduler, CogVideoXDPMScheduler]
4846

4947
transformer_kwargs = {
5048
"num_attention_heads": 4,

tests/lora/test_lora_layers_cogview4.py

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@ def from_pretrained(*args, **kwargs):
5050
class CogView4LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
5151
pipeline_class = CogView4Pipeline
5252
scheduler_cls = FlowMatchEulerDiscreteScheduler
53-
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
5453
scheduler_kwargs = {}
5554

5655
transformer_kwargs = {
@@ -124,30 +123,29 @@ def test_simple_inference_save_pretrained(self):
124123
"""
125124
Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained
126125
"""
127-
for scheduler_cls in self.scheduler_classes:
128-
components, _, _ = self.get_dummy_components(scheduler_cls)
129-
pipe = self.pipeline_class(**components)
130-
pipe = pipe.to(torch_device)
131-
pipe.set_progress_bar_config(disable=None)
132-
_, _, inputs = self.get_dummy_inputs(with_generator=False)
126+
components, _, _ = self.get_dummy_components()
127+
pipe = self.pipeline_class(**components)
128+
pipe = pipe.to(torch_device)
129+
pipe.set_progress_bar_config(disable=None)
130+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
133131

134-
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
135-
self.assertTrue(output_no_lora.shape == self.output_shape)
132+
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
133+
self.assertTrue(output_no_lora.shape == self.output_shape)
136134

137-
images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
135+
images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
138136

139-
with tempfile.TemporaryDirectory() as tmpdirname:
140-
pipe.save_pretrained(tmpdirname)
137+
with tempfile.TemporaryDirectory() as tmpdirname:
138+
pipe.save_pretrained(tmpdirname)
141139

142-
pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname)
143-
pipe_from_pretrained.to(torch_device)
140+
pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname)
141+
pipe_from_pretrained.to(torch_device)
144142

145-
images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0]
143+
images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0]
146144

147-
self.assertTrue(
148-
np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3),
149-
"Loading from saved checkpoints should give same results.",
150-
)
145+
self.assertTrue(
146+
np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3),
147+
"Loading from saved checkpoints should give same results.",
148+
)
151149

152150
@parameterized.expand([("block_level", True), ("leaf_level", False)])
153151
@require_torch_accelerator

tests/lora/test_lora_layers_flux.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,8 @@
5555
@require_peft_backend
5656
class FluxLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
5757
pipeline_class = FluxPipeline
58-
scheduler_cls = FlowMatchEulerDiscreteScheduler()
58+
scheduler_cls = FlowMatchEulerDiscreteScheduler
5959
scheduler_kwargs = {}
60-
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
6160
transformer_kwargs = {
6261
"patch_size": 1,
6362
"in_channels": 4,
@@ -282,9 +281,8 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
282281

283282
class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
284283
pipeline_class = FluxControlPipeline
285-
scheduler_cls = FlowMatchEulerDiscreteScheduler()
284+
scheduler_cls = FlowMatchEulerDiscreteScheduler
286285
scheduler_kwargs = {}
287-
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
288286
transformer_kwargs = {
289287
"patch_size": 1,
290288
"in_channels": 8,

tests/lora/test_lora_layers_hunyuanvideo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@
5151
class HunyuanVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
5252
pipeline_class = HunyuanVideoPipeline
5353
scheduler_cls = FlowMatchEulerDiscreteScheduler
54-
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
5554
scheduler_kwargs = {}
5655

5756
transformer_kwargs = {
@@ -254,6 +253,7 @@ def test_original_format_cseti(self):
254253
expected_slices = Expectations(
255254
{
256255
("cuda", 7): np.array([0.1013, 0.1924, 0.0078, 0.1021, 0.1929, 0.0078, 0.1023, 0.1919, 0.7402, 0.104, 0.4482, 0.7354, 0.0925, 0.4382, 0.7275, 0.0815]),
256+
("xpu", 3): np.array([0.1013, 0.1924, 0.0078, 0.1021, 0.1929, 0.0078, 0.1023, 0.1919, 0.7402, 0.104, 0.4482, 0.7354, 0.0925, 0.4382, 0.7275, 0.0815]),
257257
}
258258
)
259259
# fmt: on

tests/lora/test_lora_layers_ltx_video.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@
3737
class LTXVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
3838
pipeline_class = LTXPipeline
3939
scheduler_cls = FlowMatchEulerDiscreteScheduler
40-
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
4140
scheduler_kwargs = {}
4241

4342
transformer_kwargs = {

0 commit comments

Comments
 (0)