We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 897aed7 commit dae34b1Copy full SHA for dae34b1
src/diffusers/loaders/lora_conversion_utils.py
@@ -2538,8 +2538,12 @@ def normalize_out_key(k: str) -> str:
2538
2539
def get_alpha_scales(down_weight, alpha_key):
2540
rank = down_weight.shape[0]
2541
- alpha = state_dict.pop(alpha_key).item()
2542
- scale = alpha / rank # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here
+ alpha_tensor = state_dict.pop(alpha_key, None)
+ if alpha_tensor is None:
2543
+ return 1.0, 1.0
2544
+ scale = (
2545
+ alpha_tensor.item() / rank
2546
+ ) # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here
2547
scale_down = scale
2548
scale_up = 1.0
2549
while scale_down * 2 < scale_up:
0 commit comments