-
Notifications
You must be signed in to change notification settings - Fork 2.3k
Closed
Labels
Module:DemoIssues regarding demos under the demo/ directory: Diffusion, DeBERTa, BertIssues regarding demos under the demo/ directory: Diffusion, DeBERTa, Bert
Description
Description
I run TensorRT/demo/Diffusion/demo_img2img_flux.py file with --build-enable-refit and --lora-path. The process of merge lora failed.
def merge_loras(model, lora_loader):
paths, weights, scale = lora_loader.paths, lora_loader.weights, lora_loader.scale
for i, path in enumerate(paths):
print(f"[I] Loading LoRA: {path}, weight {weights[i]}")
if isinstance(lora_loader, SDLoraLoader):
state_dict, network_alphas = lora_loader.lora_state_dict(path, unet_config=model.config)
lora_loader.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=model, adapter_name=path)
elif isinstance(lora_loader, FLUXLoraLoader):
state_dict, network_alphas = lora_loader.lora_state_dict(path, return_alphas=True)
lora_loader.load_lora_into_transformer(state_dict, network_alphas=network_alphas, transformer=model, adapter_name=path)
else:
raise ValueError(f"Unsupported LoRA loader: {lora_loader}")
model.set_adapters(paths, weights=weights)
# NOTE: fuse_lora an experimental API in Diffusers
model.fuse_lora(adapter_names=paths, lora_scale=scale)
model.unload_lora()
return model
The above func failed at
lora_loader.load_lora_into_transformer(state_dict, network_alphas=network_alphas, transformer=model, adapter_name=path)
The "path" parameter is the lora path, which is a .safetensor.
path: /easycontrol_workspace/TensorRT/demo/Diffusion/lora/FLUX-dev-lora-One-Click-Creative-Template.safetensors
Error info:
发生异常: KeyError (note: full exception trace is shown but execution is paused at: _run_module_as_main)
'module name can\'t contain ".", got: /easycontrol_workspace/TensorRT/demo/Diffusion/lora/FLUX-dev-lora-One-Click-Creative-Template.safetensors'
File "/usr/local/lib/python3.11/site-packages/torch/nn/modules/module.py", line 643, in add_module
raise KeyError(f'module name can\'t contain ".", got: {name}')
File "/usr/local/lib/python3.11/site-packages/torch/nn/modules/container.py", line 495, in __setitem__
self.add_module(key, module)
File "/usr/local/lib/python3.11/site-packages/torch/nn/modules/container.py", line 560, in update
self[key] = module
~~~~^^^^^
File "/usr/local/lib/python3.11/site-packages/torch/nn/modules/container.py", line 488, in __init__
self.update(modules)
File "/usr/local/lib/python3.11/site-packages/peft/tuners/lora/layer.py", line 141, in update_layer
self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/peft/tuners/lora/layer.py", line 543, in __init__
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
lora_bias=lora_bias,
)
self.is_target_conv_1d_layer = is_target_conv_1d_layer
File "/usr/local/lib/python3.11/site-packages/peft/tuners/lora/layer.py", line 1765, in dispatch_default
new_module = Linear(target, adapter_name, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/peft/tuners/lora/model.py", line 342, in _create_new_module
new_module = dispatcher(target, adapter_name, lora_config=lora_config, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/peft/tuners/lora/model.py", line 237, in _create_and_replace
new_module = self._create_new_module(lora_config, adapter_name, target, device_map=device_map, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/peft/tuners/tuners_utils.py", line 508, in inject_adapter
self._create_and_replace(peft_config, adapter_name, target, target_name, parent, current_key=key)
File "/usr/local/lib/python3.11/site-packages/peft/tuners/tuners_utils.py", line 180, in __init__
self.inject_adapter(self.model, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage)
File "/usr/local/lib/python3.11/site-packages/peft/tuners/lora/model.py", line 142, in __init__
super().__init__(model, config, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage)
File "/usr/local/lib/python3.11/site-packages/peft/mapping.py", line 76, in inject_adapter_in_model
peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name, low_cpu_mem_usage=low_cpu_mem_usage)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/diffusers/loaders/peft.py", line 311, in load_lora_adapter
inject_adapter_in_model(lora_config, self, adapter_name=adapter_name, **peft_kwargs)
File "/usr/local/lib/python3.11/site-packages/diffusers/loaders/lora_pipeline.py", line 2262, in load_lora_into_transformer
state_dict,
network_alphas=network_alphas,
adapter_name=adapter_name,
metadata=metadata,
_pipeline=_pipeline,
low_cpu_mem_usage=low_cpu_mem_usage,
hotswap=hotswap,
)
File "/easycontrol_workspace/TensorRT/demo/Diffusion/demo_diffusion/model/lora.py", line 50, in merge_loras
lora_loader.load_lora_into_transformer(state_dict, network_alphas=network_alphas, transformer=model, adapter_name=path)
File "/easycontrol_workspace/TensorRT/demo/Diffusion/demo_diffusion/pipeline/diffusion_pipeline.py", line 695, in _refit_engine
model = merge_loras(obj.get_model(), self.lora_loader)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/easycontrol_workspace/TensorRT/demo/Diffusion/demo_diffusion/pipeline/diffusion_pipeline.py", line 857, in load_engines
self._refit_engine(obj, model_name, model_config)
File "/easycontrol_workspace/TensorRT/demo/Diffusion/demo_img2img_flux.py", line 208, in <module>
framework_model_dir=args.framework_model_dir,
**kwargs_load_engine,
)
File "/usr/local/lib/python3.11/runpy.py", line 88, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.11/runpy.py", line 198, in _run_module_as_main (Current frame)
"__main__", mod_spec)
KeyError: 'module name can\'t contain ".", got: /easycontrol_workspace/TensorRT/demo/Diffusion/lora/FLUX-dev-lora-One-Click-Creative-Template.safetensors'
Environment
TensorRT Version:
NVIDIA GPU:
NVIDIA Driver Version:
CUDA Version:
CUDNN Version:
Operating System:
Python Version (if applicable):
Tensorflow Version (if applicable):
PyTorch Version (if applicable):
Baremetal or Container (if so, version):
Relevant Files
Model link:
Steps To Reproduce
Commands or scripts:
Have you tried the latest release?:
Can this model run on other frameworks? For example run ONNX model with ONNXRuntime (polygraphy run <model.onnx> --onnxrt):
Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
Module:DemoIssues regarding demos under the demo/ directory: Diffusion, DeBERTa, BertIssues regarding demos under the demo/ directory: Diffusion, DeBERTa, Bert