Skip to content

Commit 212ef6d

Browse files
authored
Merge branch 'main' into fix_requests.get
2 parents 44b5c60 + 75d7e5c commit 212ef6d

File tree

7 files changed

+374
-19
lines changed

7 files changed

+374
-19
lines changed

docs/source/en/api/pipelines/wan.md

Lines changed: 354 additions & 11 deletions
Large diffs are not rendered by default.

src/diffusers/loaders/peft.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,9 @@ def load_lora_adapter(self, pretrained_model_name_or_path_or_dict, prefix="trans
307307
try:
308308
inject_adapter_in_model(lora_config, self, adapter_name=adapter_name, **peft_kwargs)
309309
incompatible_keys = set_peft_model_state_dict(self, state_dict, adapter_name, **peft_kwargs)
310+
# Set peft config loaded flag to True if module has been successfully injected and incompatible keys retrieved
311+
if not self._hf_peft_config_loaded:
312+
self._hf_peft_config_loaded = True
310313
except Exception as e:
311314
# In case `inject_adapter_in_model()` was unsuccessful even before injecting the `peft_config`.
312315
if hasattr(self, "peft_config"):

src/diffusers/loaders/single_file_model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,7 @@ def from_single_file(cls, pretrained_model_link_or_path_or_dict: Optional[str] =
282282
if quantization_config is not None:
283283
hf_quantizer = DiffusersAutoQuantizer.from_config(quantization_config)
284284
hf_quantizer.validate_environment()
285+
torch_dtype = hf_quantizer.update_torch_dtype(torch_dtype)
285286

286287
else:
287288
hf_quantizer = None

src/diffusers/models/transformers/latte_transformer_3d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ def forward(
273273
hidden_states = hidden_states.reshape(-1, hidden_states.shape[-2], hidden_states.shape[-1])
274274

275275
if i == 0 and num_frame > 1:
276-
hidden_states = hidden_states + self.temp_pos_embed
276+
hidden_states = hidden_states + self.temp_pos_embed.to(hidden_states.dtype)
277277

278278
if torch.is_grad_enabled() and self.gradient_checkpointing:
279279
hidden_states = self._gradient_checkpointing_func(

src/diffusers/pipelines/wan/pipeline_wan_i2v.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -220,8 +220,13 @@ def _get_t5_prompt_embeds(
220220

221221
return prompt_embeds
222222

223-
def encode_image(self, image: PipelineImageInput):
224-
image = self.image_processor(images=image, return_tensors="pt").to(self.device)
223+
def encode_image(
224+
self,
225+
image: PipelineImageInput,
226+
device: Optional[torch.device] = None,
227+
):
228+
device = device or self._execution_device
229+
image = self.image_processor(images=image, return_tensors="pt").to(device)
225230
image_embeds = self.image_encoder(**image, output_hidden_states=True)
226231
return image_embeds.hidden_states[-2]
227232

@@ -587,7 +592,7 @@ def __call__(
587592
if negative_prompt_embeds is not None:
588593
negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype)
589594

590-
image_embeds = self.encode_image(image)
595+
image_embeds = self.encode_image(image, device)
591596
image_embeds = image_embeds.repeat(batch_size, 1, 1)
592597
image_embeds = image_embeds.to(transformer_dtype)
593598

tests/quantization/bnb/test_mixed_int8.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,13 +90,16 @@ class Base8bitTests(unittest.TestCase):
9090

9191
def get_dummy_inputs(self):
9292
prompt_embeds = load_pt(
93-
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt"
93+
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt",
94+
map_location="cpu",
9495
)
9596
pooled_prompt_embeds = load_pt(
96-
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/pooled_prompt_embeds.pt"
97+
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/pooled_prompt_embeds.pt",
98+
map_location="cpu",
9799
)
98100
latent_model_input = load_pt(
99-
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/latent_model_input.pt"
101+
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/latent_model_input.pt",
102+
map_location="cpu",
100103
)
101104

102105
input_dict_for_transformer = {

tests/quantization/gguf/test_gguf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def test_gguf_linear_layers(self):
5757
if isinstance(module, torch.nn.Linear) and hasattr(module.weight, "quant_type"):
5858
assert module.weight.dtype == torch.uint8
5959
if module.bias is not None:
60-
assert module.bias.dtype == torch.float32
60+
assert module.bias.dtype == self.torch_dtype
6161

6262
def test_gguf_memory_usage(self):
6363
quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype)

0 commit comments

Comments
 (0)