@@ -4727,7 +4727,6 @@ def lora_state_dict(
47274727 - A [torch state
47284728 dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
47294729
4730- load_with_metadata: TODO
47314730 cache_dir (`Union[str, os.PathLike]`, *optional*):
47324731 Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
47334732 is not used.
@@ -4762,7 +4761,6 @@ def lora_state_dict(
47624761 subfolder = kwargs .pop ("subfolder" , None )
47634762 weight_name = kwargs .pop ("weight_name" , None )
47644763 use_safetensors = kwargs .pop ("use_safetensors" , None )
4765- load_with_metadata = kwargs .pop ("load_with_metadata" , False )
47664764
47674765 allow_pickle = False
47684766 if use_safetensors is None :
@@ -4787,7 +4785,6 @@ def lora_state_dict(
47874785 subfolder = subfolder ,
47884786 user_agent = user_agent ,
47894787 allow_pickle = allow_pickle ,
4790- load_with_metadata = load_with_metadata ,
47914788 )
47924789 if any (k .startswith ("diffusion_model." ) for k in state_dict ):
47934790 state_dict = _convert_non_diffusers_wan_lora_to_diffusers (state_dict )
@@ -4861,7 +4858,6 @@ def load_lora_weights(
48614858 raise ValueError ("PEFT backend is required for this method." )
48624859
48634860 low_cpu_mem_usage = kwargs .pop ("low_cpu_mem_usage" , _LOW_CPU_MEM_USAGE_DEFAULT_LORA )
4864- load_with_metadata = kwargs .get ("load_with_metadata" , False )
48654861 if low_cpu_mem_usage and is_peft_version ("<" , "0.13.0" ):
48664862 raise ValueError (
48674863 "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`."
@@ -4888,7 +4884,6 @@ def load_lora_weights(
48884884 adapter_name = adapter_name ,
48894885 _pipeline = self ,
48904886 low_cpu_mem_usage = low_cpu_mem_usage ,
4891- load_with_metadata = load_with_metadata ,
48924887 hotswap = hotswap ,
48934888 )
48944889
@@ -4902,54 +4897,25 @@ def load_lora_into_transformer(
49024897 _pipeline = None ,
49034898 low_cpu_mem_usage = False ,
49044899 hotswap : bool = False ,
4905- load_with_metadata : bool = False ,
49064900 ):
49074901 """
4908- This will load the LoRA layers specified in `state_dict` into `transformer`.
4909-
4910- Parameters:
4911- state_dict (`dict`):
4912- A standard state dict containing the lora layer parameters. The keys can either be indexed
4913- directly into the unet or prefixed with an additional `unet` which can be used to distinguish
4914- between text encoder lora layers.
4915- transformer (`WanTransformer3DModel`):
4916- The Transformer model to load the LoRA layers into.
4917- adapter_name (`str`, *optional*):
4918- Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
4919- `default_{i}` where i is the total number of adapters being loaded.
4920- low_cpu_mem_usage (`bool`, *optional*):
4921- Speed up model loading by only loading the pretrained LoRA weights and not initializing the
4922- random weights.
4923- <<<<<<< HEAD
4924- hotswap : (`bool`, *optional*)
4925- Defaults to `False`. Whether to substitute an existing (LoRA) adapter with the newly loaded
4926- adapter in-place. This means that, instead of loading an additional adapter, this will take the
4927- existing adapter weights and replace them with the weights of the new adapter. This can be
4928- faster and more memory efficient. However, the main advantage of hotswapping is that when the
4929- model is compiled with torch.compile, loading the new adapter does not require recompilation of
4930- the model. When using hotswapping, the passed `adapter_name` should be the name of an already
4931- loaded adapter.
4932-
4933- If the new adapter and the old adapter have different ranks and/or LoRA alphas (i.e. scaling),
4934- you need to call an additional method before loading the adapter:
4935-
4936- ```py
4937- pipeline = ... # load diffusers pipeline
4938- max_rank = ... # the highest rank among all LoRAs that you want to load
4939- # call *before* compiling and loading the LoRA adapter
4940- pipeline.enable_lora_hotswap(target_rank=max_rank)
4941- pipeline.load_lora_weights(file_name)
4942- # optionally compile the model now
4943- ```
4944-
4945- Note that hotswapping adapters of the text encoder is not yet supported. There are some further
4946- limitations to this technique, which are documented here:
4947- https://huggingface.co/docs/peft/main/en/package_reference/hotswap
4948- load_with_metadata: TODO
4949- =======
4950- hotswap (`bool`, *optional*):
4951- See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
4952- >>>>>>> main
4902+ This will load the LoRA layers specified in `state_dict` into `transformer`.
4903+
4904+ Parameters:
4905+ state_dict (`dict`):
4906+ A standard state dict containing the lora layer parameters. The keys can either be indexed directly
4907+ into the unet or prefixed with an additional `unet` which can be used to distinguish between text
4908+ encoder lora layers.
4909+ transformer (`WanTransformer3DModel`):
4910+ The Transformer model to load the LoRA layers into.
4911+ adapter_name (`str`, *optional*):
4912+ Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
4913+ `default_{i}` where i is the total number of adapters being loaded.
4914+ low_cpu_mem_usage (`bool`, *optional*):
4915+ Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
4916+ weights.
4917+ hotswap (`bool`, *optional*):
4918+ See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
49534919 """
49544920 if low_cpu_mem_usage and is_peft_version ("<" , "0.13.0" ):
49554921 raise ValueError (
@@ -4965,7 +4931,6 @@ def load_lora_into_transformer(
49654931 _pipeline = _pipeline ,
49664932 low_cpu_mem_usage = low_cpu_mem_usage ,
49674933 hotswap = hotswap ,
4968- load_with_metadata = load_with_metadata ,
49694934 )
49704935
49714936 @classmethod
0 commit comments