|
54 | 54 | "SanaTransformer2DModel": lambda model_cls, weights: weights, |
55 | 55 | } |
56 | 56 | _NO_CONFIG_UPDATE_KEYS = ["to_k", "to_q", "to_v"] |
| 57 | +_FULL_NAME_PREFIX_FOR_PEFT = "FULL-NAME" |
57 | 58 |
|
58 | 59 |
|
59 | 60 | def _maybe_adjust_config(config): |
@@ -188,6 +189,7 @@ def load_lora_adapter(self, pretrained_model_name_or_path_or_dict, prefix="trans |
188 | 189 | """ |
189 | 190 | from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict |
190 | 191 | from peft.tuners.tuners_utils import BaseTunerLayer |
| 192 | + from peft.utils.constants import FULLY_QUALIFIED_PATTERN_KEY_PREFIX |
191 | 193 |
|
192 | 194 | cache_dir = kwargs.pop("cache_dir", None) |
193 | 195 | force_download = kwargs.pop("force_download", False) |
@@ -253,14 +255,14 @@ def load_lora_adapter(self, pretrained_model_name_or_path_or_dict, prefix="trans |
253 | 255 | # Cannot figure out rank from lora layers that don't have atleast 2 dimensions. |
254 | 256 | # Bias layers in LoRA only have a single dimension |
255 | 257 | if "lora_B" in key and val.ndim > 1: |
256 | | - rank[key] = val.shape[1] |
| 258 | + rank[f"{FULLY_QUALIFIED_PATTERN_KEY_PREFIX}{key}"] = val.shape[1] |
257 | 259 |
|
258 | 260 | if network_alphas is not None and len(network_alphas) >= 1: |
259 | 261 | alpha_keys = [k for k in network_alphas.keys() if k.startswith(f"{prefix}.")] |
260 | 262 | network_alphas = {k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys} |
261 | 263 |
|
262 | 264 | lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=network_alphas, peft_state_dict=state_dict) |
263 | | - lora_config_kwargs = _maybe_adjust_config(lora_config_kwargs) |
| 265 | + # lora_config_kwargs = _maybe_adjust_config(lora_config_kwargs) |
264 | 266 |
|
265 | 267 | if "use_dora" in lora_config_kwargs: |
266 | 268 | if lora_config_kwargs["use_dora"]: |
|
0 commit comments