Skip to content
4 changes: 2 additions & 2 deletions src/diffusers/loaders/lora_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -2485,14 +2485,14 @@ def _maybe_expand_transformer_param_shape_or_error_(
# TODO: consider if this layer needs to be a quantized layer as well if `is_quantized` is True.
with torch.device("meta"):
expanded_module = torch.nn.Linear(
in_features, out_features, bias=bias, dtype=module_weight.dtype
in_features, out_features, bias=bias, dtype=transformer.dtype
)
# Only weights are expanded and biases are not. This is because only the input dimensions
# are changed while the output dimensions remain the same. The shape of the weight tensor
# is (out_features, in_features), while the shape of bias tensor is (out_features,), which
# explains the reason why only weights are expanded.
new_weight = torch.zeros_like(
expanded_module.weight.data, device=module_weight.device, dtype=module_weight.dtype
expanded_module.weight.data, device=module_weight.device, dtype=transformer.dtype
)
slices = tuple(slice(0, dim) for dim in module_weight_shape)
new_weight[slices] = module_weight
Expand Down
Loading