@@ -76,7 +76,6 @@ class BaseConfig:
7676 If left empty, the model will either be initialized with random weights
7777 if it is a Llama model or the weights will be downloaded from HuggingFace
7878 if it is a non-Llama model.
79- checkpoint_dir: Path to directory containing sharded checkpoint files.
8079 adapter_checkpoint: Path to the adapter.pt file from torchtune. Used if
8180 the model has trained LoRA adapters. Must provide
8281 adapter_config.json.
@@ -87,10 +86,6 @@ class BaseConfig:
8786 e.g. '"{\" get_bos_id\" :128000, \" get_eos_ids\" :[128009, 128001]}"'
8887 use_lora: Only for use with QAT. Rank of the LoRA adapter, disabled
8988 if set to 0.
90- fairseq2: For legacy internal use cases, this is safe to ignore.
91- preq_mode: Legacy option to specify how prequantized weights are loaded.
92- Going forward, ExecuTorch supports loading weights prequantized through
93- TorchAo as-is, without any special handling.
9489 preq_group_size: Legacy option to specify the group size of prequantized weights.
9590 preq_embedding_quantize: Legacy option to specify how prequantized embeddings
9691 are loaded.
@@ -99,13 +94,11 @@ class BaseConfig:
9994 model_class : ModelType = ModelType .llama3
10095 params : Optional [str ] = None
10196 checkpoint : Optional [str ] = None
102- checkpoint_dir : Optional [str ] = None
10397 adapter_checkpoint : Optional [str ] = None
10498 adapter_config : Optional [str ] = None
10599 tokenizer_path : Optional [str ] = None
106100 metadata : Optional [str ] = None
107101 use_lora : int = 0
108- fairseq2 : bool = False
109102 preq_mode : Optional [PreqMode ] = None
110103 preq_group_size : int = 32
111104 preq_embedding_quantize : str = "8,0"
@@ -527,8 +520,6 @@ def from_args(cls, args: argparse.Namespace) -> "LlmConfig": # noqa: C901
527520 llm_config .base .params = args .params
528521 if hasattr (args , "checkpoint" ):
529522 llm_config .base .checkpoint = args .checkpoint
530- if hasattr (args , "checkpoint_dir" ):
531- llm_config .base .checkpoint_dir = args .checkpoint_dir
532523 if hasattr (args , "adapter_checkpoint" ):
533524 llm_config .base .adapter_checkpoint = args .adapter_checkpoint
534525 if hasattr (args , "adapter_config" ):
@@ -539,8 +530,6 @@ def from_args(cls, args: argparse.Namespace) -> "LlmConfig": # noqa: C901
539530 llm_config .base .metadata = args .metadata
540531 if hasattr (args , "use_lora" ):
541532 llm_config .base .use_lora = args .use_lora
542- if hasattr (args , "fairseq2" ):
543- llm_config .base .fairseq2 = args .fairseq2
544533
545534 # PreqMode settings
546535 if hasattr (args , "preq_mode" ) and args .preq_mode :
0 commit comments