Skip to content

Commit e1fae4e

Browse files
authored
Remove fairseq from export_llama
Differential Revision: D87831086 Pull Request resolved: #16052
1 parent aa4ab02 commit e1fae4e

File tree

4 files changed

+1
-55
lines changed

4 files changed

+1
-55
lines changed

examples/models/llama/export_llama_lib.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -396,7 +396,6 @@ def build_args_parser() -> argparse.ArgumentParser:
396396
" [16] pattern specifies all layers have sliding window of 16.",
397397
)
398398

399-
parser.add_argument("-2", "--fairseq2", action="store_true")
400399
parser.add_argument("-v", "--verbose", action="store_true")
401400
parser.add_argument(
402401
"-X",

examples/models/llama/model.py

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -21,17 +21,6 @@
2121
from executorch.extension.llm.export.config.llm_config import LlmConfig
2222
from torchao.utils import TorchAOBaseTensor
2323

24-
try:
25-
from .fairseq2 import convert_to_llama_checkpoint
26-
27-
except ImportError:
28-
29-
def convert_to_llama_checkpoint(**kwargs):
30-
raise NotImplementedError(
31-
"Please install fairseq2 with `pip install fairseq2`."
32-
)
33-
34-
3524
from ..model_base import EagerModelBase
3625

3726

@@ -70,33 +59,10 @@ def __init__(self, llm_config: Optional[LlmConfig] = None):
7059
checkpoint = {}
7160
if checkpoint_path:
7261
checkpoint = torch.load(checkpoint_path, map_location=device, mmap=True)
73-
74-
# If given checkpoint is fairseq, convert to llama checkpoint.
75-
fairseq2_checkpoint = self.llm_config.base.fairseq2
76-
if fairseq2_checkpoint:
77-
print("Using fairseq2 checkpoint")
78-
checkpoint = convert_to_llama_checkpoint(checkpoint=checkpoint)
7962
if "model" in checkpoint:
8063
# NB: some checkpoint contains a "model" field, which is the actual weights dict
8164
checkpoint = checkpoint["model"]
8265

83-
# Check if user gave a fairseq2 checkpoint unknowingly without specifying --fairseq2.
84-
if (not fairseq2_checkpoint) and checkpoint.get(
85-
"final_proj.weight", None
86-
) is not None:
87-
raise ValueError(
88-
"""
89-
************************************************************
90-
This looks like a Fairseq2 checkpoint (based on the presence
91-
of `final_proj.weight`.
92-
93-
You can import Fairseq2 checkpoints using the --fairseq2
94-
option, but --fairseq2 was not specified. Please verify
95-
the checkpoint format to avoid generating faulty models.
96-
************************************************************
97-
"""
98-
)
99-
10066
# Get optional params.
10167
params = {}
10268
if params_path:

examples/models/llama/source_transformation/quantize.py

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,6 @@
1717
from executorch.extension.llm.export.builder import DType
1818

1919

20-
try:
21-
from fairseq2.nn.embedding import (
22-
Embedding as fsEmbedding,
23-
StandardEmbedding as fsStandardEmbedding,
24-
)
25-
26-
from fairseq2.nn.projection import Linear as fsLinear
27-
28-
print("Using fairseq2 modules.")
29-
except:
30-
fsEmbedding = nn.Embedding
31-
fsStandardEmbedding = nn.Embedding
32-
fsLinear = nn.Linear
33-
34-
3520
def quantize( # noqa C901
3621
model: torch.nn.Module,
3722
qmode: str,
@@ -400,7 +385,7 @@ def create_quantized_state_dict(self) -> Dict:
400385

401386
for fqn, mod in self.mod.named_modules():
402387
# print(f"maybe? quantize {fqn}...{type(mod)}")
403-
if isinstance(mod, torch.nn.Linear) or isinstance(mod, fsLinear):
388+
if isinstance(mod, torch.nn.Linear):
404389
# print(f"candidate {fqn}, nodetype {self.node_type}")
405390
if (
406391
(self.node_type == "*")

extension/llm/export/config/llm_config.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ class BaseConfig:
8686
e.g. '"{\"get_bos_id\":128000, \"get_eos_ids\":[128009, 128001]}"'
8787
use_lora: Only for use with QAT. Rank of the LoRA adapter, disabled
8888
if set to 0.
89-
fairseq2: For legacy internal use cases, this is safe to ignore.
9089
preq_mode: Legacy option to specify how prequantized weights are loaded.
9190
Going forward, ExecuTorch supports loading weights prequantized through
9291
TorchAo as-is, without any special handling.
@@ -103,7 +102,6 @@ class BaseConfig:
103102
tokenizer_path: Optional[str] = None
104103
metadata: Optional[str] = None
105104
use_lora: int = 0
106-
fairseq2: bool = False
107105
preq_mode: Optional[PreqMode] = None
108106
preq_group_size: int = 32
109107
preq_embedding_quantize: str = "8,0"
@@ -535,8 +533,6 @@ def from_args(cls, args: argparse.Namespace) -> "LlmConfig": # noqa: C901
535533
llm_config.base.metadata = args.metadata
536534
if hasattr(args, "use_lora"):
537535
llm_config.base.use_lora = args.use_lora
538-
if hasattr(args, "fairseq2"):
539-
llm_config.base.fairseq2 = args.fairseq2
540536

541537
# PreqMode settings
542538
if hasattr(args, "preq_mode") and args.preq_mode:

0 commit comments

Comments
 (0)