@@ -215,9 +215,10 @@ class ExportConfig:
215
215
so_library: Shared library to specify custom quantized operators.
216
216
export_only: Whether to stop right after torch.export() and
217
217
just save the exported .pt2 graph file.
218
- foundation_weights_file: configure the foundation weights of a model
219
- to be placed in a separate file, external to the PTE. Pass the
220
- intended file name here.
218
+ foundation_weights_file: place the foundation weights of the model into
219
+ a separate file, external to the PTE. Pass the file name here.
220
+ lora_weights_file: place the lora weights of the model into a
221
+ separate file, external to the PTE. Pass the file name here.
221
222
"""
222
223
223
224
max_seq_length : int = 128
@@ -227,6 +228,7 @@ class ExportConfig:
227
228
so_library : Optional [str ] = None
228
229
export_only : bool = False
229
230
foundation_weights_file : Optional [str ] = None
231
+ lora_weights_file : Optional [str ] = None
230
232
231
233
def __post_init__ (self ):
232
234
if self .max_context_length < self .max_seq_length :
@@ -572,6 +574,8 @@ def from_args(cls, args: argparse.Namespace) -> "LlmConfig": # noqa: C901
572
574
llm_config .export .export_only = args .export_only
573
575
if hasattr (args , "foundation_weights_file" ):
574
576
llm_config .export .foundation_weights_file = args .foundation_weights_file
577
+ if hasattr (args , "lora_weights_file" ):
578
+ llm_config .export .lora_weights_file = args .lora_weights_file
575
579
576
580
# QuantizationConfig
577
581
if hasattr (args , "quantization_mode" ):
0 commit comments