We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ec068f9 commit 6cf0be5Copy full SHA for 6cf0be5
src/diffusers/models/transformers/transformer_sd3.py
@@ -282,9 +282,10 @@ def forward(
282
# weight the lora layers by setting `lora_scale` for each PEFT layer
283
scale_lora_layers(self, lora_scale)
284
else:
285
- logger.warning(
286
- "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
287
- )
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
+ logger.warning(
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
288
+ )
289
290
height, width = hidden_states.shape[-2:]
291
0 commit comments