Skip to content

Commit fb8ec95

Browse files
authored
Merge branch 'main' into quant-compile-tests
2 parents d44a29d + 33e636c commit fb8ec95

35 files changed

+206
-103
lines changed

src/diffusers/loaders/lora_pipeline.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -81,12 +81,17 @@ def _maybe_dequantize_weight_for_expanded_lora(model, module):
8181
from ..quantizers.gguf.utils import dequantize_gguf_tensor
8282

8383
is_bnb_4bit_quantized = module.weight.__class__.__name__ == "Params4bit"
84+
is_bnb_8bit_quantized = module.weight.__class__.__name__ == "Int8Params"
8485
is_gguf_quantized = module.weight.__class__.__name__ == "GGUFParameter"
8586

8687
if is_bnb_4bit_quantized and not is_bitsandbytes_available():
8788
raise ValueError(
8889
"The checkpoint seems to have been quantized with `bitsandbytes` (4bits). Install `bitsandbytes` to load quantized checkpoints."
8990
)
91+
if is_bnb_8bit_quantized and not is_bitsandbytes_available():
92+
raise ValueError(
93+
"The checkpoint seems to have been quantized with `bitsandbytes` (8bits). Install `bitsandbytes` to load quantized checkpoints."
94+
)
9095
if is_gguf_quantized and not is_gguf_available():
9196
raise ValueError(
9297
"The checkpoint seems to have been quantized with `gguf`. Install `gguf` to load quantized checkpoints."
@@ -97,10 +102,10 @@ def _maybe_dequantize_weight_for_expanded_lora(model, module):
97102
weight_on_cpu = True
98103

99104
device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
100-
if is_bnb_4bit_quantized:
105+
if is_bnb_4bit_quantized or is_bnb_8bit_quantized:
101106
module_weight = dequantize_bnb_weight(
102107
module.weight.to(device) if weight_on_cpu else module.weight,
103-
state=module.weight.quant_state,
108+
state=module.weight.quant_state if is_bnb_4bit_quantized else module.state,
104109
dtype=model.dtype,
105110
).data
106111
elif is_gguf_quantized:

src/diffusers/pipelines/flux/pipeline_flux_inpaint.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1193,6 +1193,11 @@ def __call__(
11931193
image = self.vae.decode(latents, return_dict=False)[0]
11941194
image = self.image_processor.postprocess(image, output_type=output_type)
11951195

1196+
if padding_mask_crop is not None:
1197+
image = [
1198+
self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image
1199+
]
1200+
11961201
# Offload all models
11971202
self.maybe_free_model_hooks()
11981203

src/diffusers/pipelines/wan/pipeline_wan_video2video.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -419,12 +419,7 @@ def prepare_latents(
419419
)
420420

421421
if latents is None:
422-
if isinstance(generator, list):
423-
init_latents = [
424-
retrieve_latents(self.vae.encode(video[i].unsqueeze(0)), generator[i]) for i in range(batch_size)
425-
]
426-
else:
427-
init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), generator) for vid in video]
422+
init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), sample_mode="argmax") for vid in video]
428423

429424
init_latents = torch.cat(init_latents, dim=0).to(dtype)
430425

@@ -441,7 +436,7 @@ def prepare_latents(
441436
if hasattr(self.scheduler, "add_noise"):
442437
latents = self.scheduler.add_noise(init_latents, noise, timestep)
443438
else:
444-
latents = self.scheduelr.scale_noise(init_latents, timestep, noise)
439+
latents = self.scheduler.scale_noise(init_latents, timestep, noise)
445440
else:
446441
latents = latents.to(device)
447442

src/diffusers/quantizers/quantization_config.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -493,7 +493,7 @@ def __init__(self, quant_type: str, modules_to_not_convert: Optional[List[str]]
493493
TORCHAO_QUANT_TYPE_METHODS = self._get_torchao_quant_type_to_method()
494494
if self.quant_type not in TORCHAO_QUANT_TYPE_METHODS.keys():
495495
is_floating_quant_type = self.quant_type.startswith("float") or self.quant_type.startswith("fp")
496-
if is_floating_quant_type and not self._is_cuda_capability_atleast_8_9():
496+
if is_floating_quant_type and not self._is_xpu_or_cuda_capability_atleast_8_9():
497497
raise ValueError(
498498
f"Requested quantization type: {self.quant_type} is not supported on GPUs with CUDA capability <= 8.9. You "
499499
f"can check the CUDA capability of your GPU using `torch.cuda.get_device_capability()`."
@@ -645,7 +645,7 @@ def generate_fpx_quantization_types(bits: int):
645645
QUANTIZATION_TYPES.update(INT8_QUANTIZATION_TYPES)
646646
QUANTIZATION_TYPES.update(UINTX_QUANTIZATION_DTYPES)
647647

648-
if cls._is_cuda_capability_atleast_8_9():
648+
if cls._is_xpu_or_cuda_capability_atleast_8_9():
649649
QUANTIZATION_TYPES.update(FLOATX_QUANTIZATION_TYPES)
650650

651651
return QUANTIZATION_TYPES
@@ -655,14 +655,16 @@ def generate_fpx_quantization_types(bits: int):
655655
)
656656

657657
@staticmethod
658-
def _is_cuda_capability_atleast_8_9() -> bool:
659-
if not torch.cuda.is_available():
660-
raise RuntimeError("TorchAO requires a CUDA compatible GPU and installation of PyTorch.")
661-
662-
major, minor = torch.cuda.get_device_capability()
663-
if major == 8:
664-
return minor >= 9
665-
return major >= 9
658+
def _is_xpu_or_cuda_capability_atleast_8_9() -> bool:
659+
if torch.cuda.is_available():
660+
major, minor = torch.cuda.get_device_capability()
661+
if major == 8:
662+
return minor >= 9
663+
return major >= 9
664+
elif torch.xpu.is_available():
665+
return True
666+
else:
667+
raise RuntimeError("TorchAO requires a CUDA compatible GPU or Intel XPU and installation of PyTorch.")
666668

667669
def get_apply_tensor_subclass(self):
668670
TORCHAO_QUANT_TYPE_METHODS = self._get_torchao_quant_type_to_method()

src/diffusers/utils/dynamic_modules_utils.py

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -154,12 +154,30 @@ def check_imports(filename):
154154
return get_relative_imports(filename)
155155

156156

157-
def get_class_in_module(class_name, module_path):
157+
def get_class_in_module(class_name, module_path, pretrained_model_name_or_path=None):
158158
"""
159159
Import a module on the cache directory for modules and extract a class from it.
160160
"""
161161
module_path = module_path.replace(os.path.sep, ".")
162-
module = importlib.import_module(module_path)
162+
try:
163+
module = importlib.import_module(module_path)
164+
except ModuleNotFoundError as e:
165+
# This can happen when the repo id contains ".", which Python's import machinery interprets as a directory
166+
# separator. We do a bit of monkey patching to detect and fix this case.
167+
if not (
168+
pretrained_model_name_or_path is not None
169+
and "." in pretrained_model_name_or_path
170+
and module_path.startswith("diffusers_modules")
171+
and pretrained_model_name_or_path.replace("/", "--") in module_path
172+
):
173+
raise e # We can't figure this one out, just reraise the original error
174+
175+
corrected_path = os.path.join(HF_MODULES_CACHE, module_path.replace(".", "/")) + ".py"
176+
corrected_path = corrected_path.replace(
177+
pretrained_model_name_or_path.replace("/", "--").replace(".", "/"),
178+
pretrained_model_name_or_path.replace("/", "--"),
179+
)
180+
module = importlib.machinery.SourceFileLoader(module_path, corrected_path).load_module()
163181

164182
if class_name is None:
165183
return find_pipeline_class(module)
@@ -454,4 +472,4 @@ def get_class_from_dynamic_module(
454472
revision=revision,
455473
local_files_only=local_files_only,
456474
)
457-
return get_class_in_module(class_name, final_module.replace(".py", ""))
475+
return get_class_in_module(class_name, final_module.replace(".py", ""), pretrained_model_name_or_path)

src/diffusers/utils/testing_utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -312,9 +312,7 @@ def require_torch_gpu(test_case):
312312

313313
def require_torch_cuda_compatibility(expected_compute_capability):
314314
def decorator(test_case):
315-
if not torch.cuda.is_available():
316-
return unittest.skip(test_case)
317-
else:
315+
if torch.cuda.is_available():
318316
current_compute_capability = get_torch_cuda_device_capability()
319317
return unittest.skipUnless(
320318
float(current_compute_capability) == float(expected_compute_capability),

tests/models/autoencoders/test_models_consistency_decoder_vae.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121

2222
from diffusers import ConsistencyDecoderVAE, StableDiffusionPipeline
2323
from diffusers.utils.testing_utils import (
24+
backend_empty_cache,
2425
enable_full_determinism,
2526
load_image,
2627
slow,
@@ -162,13 +163,13 @@ def setUp(self):
162163
# clean up the VRAM before each test
163164
super().setUp()
164165
gc.collect()
165-
torch.cuda.empty_cache()
166+
backend_empty_cache(torch_device)
166167

167168
def tearDown(self):
168169
# clean up the VRAM after each test
169170
super().tearDown()
170171
gc.collect()
171-
torch.cuda.empty_cache()
172+
backend_empty_cache(torch_device)
172173

173174
@torch.no_grad()
174175
def test_encode_decode(self):

tests/models/unets/test_models_unet_2d.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
from diffusers import UNet2DModel
2323
from diffusers.utils import logging
2424
from diffusers.utils.testing_utils import (
25+
backend_empty_cache,
2526
enable_full_determinism,
2627
floats_tensor,
2728
require_torch_accelerator,
@@ -229,7 +230,7 @@ def test_from_pretrained_accelerate_wont_change_results(self):
229230

230231
# two models don't need to stay in the device at the same time
231232
del model_accelerate
232-
torch.cuda.empty_cache()
233+
backend_empty_cache(torch_device)
233234
gc.collect()
234235

235236
model_normal_load, _ = UNet2DModel.from_pretrained(

tests/models/unets/test_models_unet_2d_condition.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@
4646
require_peft_backend,
4747
require_torch_accelerator,
4848
require_torch_accelerator_with_fp16,
49-
require_torch_gpu,
5049
skip_mps,
5150
slow,
5251
torch_all_close,
@@ -978,13 +977,13 @@ def test_ip_adapter_plus(self):
978977
assert sample2.allclose(sample5, atol=1e-4, rtol=1e-4)
979978
assert sample2.allclose(sample6, atol=1e-4, rtol=1e-4)
980979

981-
@require_torch_gpu
982980
@parameterized.expand(
983981
[
984982
("hf-internal-testing/unet2d-sharded-dummy", None),
985983
("hf-internal-testing/tiny-sd-unet-sharded-latest-format", "fp16"),
986984
]
987985
)
986+
@require_torch_accelerator
988987
def test_load_sharded_checkpoint_from_hub(self, repo_id, variant):
989988
_, inputs_dict = self.prepare_init_args_and_inputs_for_common()
990989
loaded_model = self.model_class.from_pretrained(repo_id, variant=variant)
@@ -994,13 +993,13 @@ def test_load_sharded_checkpoint_from_hub(self, repo_id, variant):
994993
assert loaded_model
995994
assert new_output.sample.shape == (4, 4, 16, 16)
996995

997-
@require_torch_gpu
998996
@parameterized.expand(
999997
[
1000998
("hf-internal-testing/unet2d-sharded-dummy-subfolder", None),
1001999
("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "fp16"),
10021000
]
10031001
)
1002+
@require_torch_accelerator
10041003
def test_load_sharded_checkpoint_from_hub_subfolder(self, repo_id, variant):
10051004
_, inputs_dict = self.prepare_init_args_and_inputs_for_common()
10061005
loaded_model = self.model_class.from_pretrained(repo_id, subfolder="unet", variant=variant)

tests/pipelines/allegro/test_allegro.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424

2525
from diffusers import AllegroPipeline, AllegroTransformer3DModel, AutoencoderKLAllegro, DDIMScheduler
2626
from diffusers.utils.testing_utils import (
27+
backend_empty_cache,
2728
enable_full_determinism,
2829
numpy_cosine_similarity_distance,
2930
require_hf_hub_version_greater,
@@ -341,12 +342,12 @@ class AllegroPipelineIntegrationTests(unittest.TestCase):
341342
def setUp(self):
342343
super().setUp()
343344
gc.collect()
344-
torch.cuda.empty_cache()
345+
backend_empty_cache(torch_device)
345346

346347
def tearDown(self):
347348
super().tearDown()
348349
gc.collect()
349-
torch.cuda.empty_cache()
350+
backend_empty_cache(torch_device)
350351

351352
def test_allegro(self):
352353
generator = torch.Generator("cpu").manual_seed(0)

0 commit comments

Comments
 (0)