From d60173b03975be61073d65c09c8c5e56fcec5e39 Mon Sep 17 00:00:00 2001 From: Inkling <64665980+Inklingdq@users.noreply.github.com> Date: Wed, 19 Mar 2025 23:17:42 -0700 Subject: [PATCH 1/2] Add SmolLM (smollm2) (#9354) --- examples/models/llama/export_llama_lib.py | 1 + examples/models/smollm2/135M_config.json | 14 ++++ examples/models/smollm2/__init__ | 14 ++++ examples/models/smollm2/convert_weights.py | 80 ++++++++++++++++++++++ 4 files changed, 109 insertions(+) create mode 100644 examples/models/smollm2/135M_config.json create mode 100644 examples/models/smollm2/__init__ create mode 100644 examples/models/smollm2/convert_weights.py diff --git a/examples/models/llama/export_llama_lib.py b/examples/models/llama/export_llama_lib.py index ff2b82f6c65..cb887252571 100644 --- a/examples/models/llama/export_llama_lib.py +++ b/examples/models/llama/export_llama_lib.py @@ -95,6 +95,7 @@ "static_llama", "qwen2_5", "phi-4-mini", + "smollm2", ] TORCHTUNE_DEFINED_MODELS = ["llama3_2_vision"] diff --git a/examples/models/smollm2/135M_config.json b/examples/models/smollm2/135M_config.json new file mode 100644 index 00000000000..604c7e94ab5 --- /dev/null +++ b/examples/models/smollm2/135M_config.json @@ -0,0 +1,14 @@ +{ + "dim": 576, + "ffn_dim_multiplier": 1, + "hidden_dim": 1536, + "n_heads": 9, + "n_kv_heads": 3, + "n_layers": 30, + "norm_eps": 1e-05, + "rope_theta": 10000.0, + "use_scaled_rope": false, + "vocab_size": 49152, + "use_hf_rope": false, + "attention_qkv_bias": false + } diff --git a/examples/models/smollm2/__init__ b/examples/models/smollm2/__init__ new file mode 100644 index 00000000000..3d01bf9eb42 --- /dev/null +++ b/examples/models/smollm2/__init__ @@ -0,0 +1,14 @@ +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from executorch.example.models.llama.model import Llama2Model + + +class SmolLM2Model(Llama2Model): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + +__all__ = [ + "SmolLM2Model", +] diff --git a/examples/models/smollm2/convert_weights.py b/examples/models/smollm2/convert_weights.py new file mode 100644 index 00000000000..db80bd47b8c --- /dev/null +++ b/examples/models/smollm2/convert_weights.py @@ -0,0 +1,80 @@ +import argparse +from typing import Dict + +import torch + +from torchtune.models.convert_weights import get_mapped_key + +from torchtune.training import FullModelHFCheckpointer + +# Standard _FROM_META weight mapping of Meta weights to TorchTune + additional bias weight mappings. +_SMOLLM_FROM_META = { + "tok_embeddings.weight": "tok_embeddings.weight", + "norm.weight": "norm.scale", + "output.weight": "output.weight", + "layers.{}.attention.wk.weight": "layers.{}.attn.k_proj.weight", + "layers.{}.attention.wq.weight": "layers.{}.attn.q_proj.weight", + "layers.{}.attention.wv.weight": "layers.{}.attn.v_proj.weight", + "layers.{}.attention.wo.weight": "layers.{}.attn.output_proj.weight", + "layers.{}.attention_norm.weight": "layers.{}.sa_norm.scale", + "layers.{}.ffn_norm.weight": "layers.{}.mlp_norm.scale", + "layers.{}.feed_forward.w1.weight": "layers.{}.mlp.w1.weight", + "layers.{}.feed_forward.w2.weight": "layers.{}.mlp.w2.weight", + "layers.{}.feed_forward.w3.weight": "layers.{}.mlp.w3.weight", +} + + +def smollm_tune_to_meta(state_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: + """ + Convert a state dict from torchtune's format to Meta's format. This function + doesn't handle any sharding or splitting of state dicts. It follows the + state_dict IN -> state_dict OUT pattern. + + Args: + state_dict (Dict[str, torch.Tensor]): State dict in torchtune's format. + + Returns: + Dict[str, torch.Tensor]: State dict in Meta's format. + """ + converted_state_dict = {} + inverted_mapping_dict = {v: k for k, v in _SMOLLM_FROM_META.items()} + for key, value in state_dict.items(): + new_key = get_mapped_key(key, inverted_mapping_dict) + converted_state_dict[new_key] = value + + return converted_state_dict + + +def main(): + parser = argparse.ArgumentParser( + description="Convert SmolLM weights to Meta format." + ) + parser.add_argument( + "input_dir", + type=str, + help="Path to directory containing checkpoint files", + ) + parser.add_argument("output", type=str, help="Path to the output checkpoint") + + args = parser.parse_args() + + # Don't necessarily need to use TorchTune checkpointer, can just aggregate checkpoint files by ourselves. + checkpointer = FullModelHFCheckpointer( + checkpoint_dir=args.input_dir, + checkpoint_files=["model.safetensors"], + output_dir=".", + model_type="LLAMA", + ) + + print("Loading checkpoint...") + sd = checkpointer.load_checkpoint() + + print("Converting checkpoint...") + sd = smollm_tune_to_meta(sd["model"]) + + torch.save(sd, args.output) + print(f"Checkpoint saved to {args.output}") + + +if __name__ == "__main__": + main() From d2757b870588aae1bfe7ca4c06fd633ffe087ae3 Mon Sep 17 00:00:00 2001 From: Jack <32371937+jackzhxng@users.noreply.github.com> Date: Mon, 24 Mar 2025 06:57:48 -0700 Subject: [PATCH 2/2] Revert "Add SmolLM (smollm2) (#9354)" This reverts commit d60173b03975be61073d65c09c8c5e56fcec5e39. --- examples/models/llama/export_llama_lib.py | 1 - examples/models/smollm2/135M_config.json | 14 ---- examples/models/smollm2/__init__ | 14 ---- examples/models/smollm2/convert_weights.py | 80 ---------------------- 4 files changed, 109 deletions(-) delete mode 100644 examples/models/smollm2/135M_config.json delete mode 100644 examples/models/smollm2/__init__ delete mode 100644 examples/models/smollm2/convert_weights.py diff --git a/examples/models/llama/export_llama_lib.py b/examples/models/llama/export_llama_lib.py index cb887252571..ff2b82f6c65 100644 --- a/examples/models/llama/export_llama_lib.py +++ b/examples/models/llama/export_llama_lib.py @@ -95,7 +95,6 @@ "static_llama", "qwen2_5", "phi-4-mini", - "smollm2", ] TORCHTUNE_DEFINED_MODELS = ["llama3_2_vision"] diff --git a/examples/models/smollm2/135M_config.json b/examples/models/smollm2/135M_config.json deleted file mode 100644 index 604c7e94ab5..00000000000 --- a/examples/models/smollm2/135M_config.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "dim": 576, - "ffn_dim_multiplier": 1, - "hidden_dim": 1536, - "n_heads": 9, - "n_kv_heads": 3, - "n_layers": 30, - "norm_eps": 1e-05, - "rope_theta": 10000.0, - "use_scaled_rope": false, - "vocab_size": 49152, - "use_hf_rope": false, - "attention_qkv_bias": false - } diff --git a/examples/models/smollm2/__init__ b/examples/models/smollm2/__init__ deleted file mode 100644 index 3d01bf9eb42..00000000000 --- a/examples/models/smollm2/__init__ +++ /dev/null @@ -1,14 +0,0 @@ -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -from executorch.example.models.llama.model import Llama2Model - - -class SmolLM2Model(Llama2Model): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - -__all__ = [ - "SmolLM2Model", -] diff --git a/examples/models/smollm2/convert_weights.py b/examples/models/smollm2/convert_weights.py deleted file mode 100644 index db80bd47b8c..00000000000 --- a/examples/models/smollm2/convert_weights.py +++ /dev/null @@ -1,80 +0,0 @@ -import argparse -from typing import Dict - -import torch - -from torchtune.models.convert_weights import get_mapped_key - -from torchtune.training import FullModelHFCheckpointer - -# Standard _FROM_META weight mapping of Meta weights to TorchTune + additional bias weight mappings. -_SMOLLM_FROM_META = { - "tok_embeddings.weight": "tok_embeddings.weight", - "norm.weight": "norm.scale", - "output.weight": "output.weight", - "layers.{}.attention.wk.weight": "layers.{}.attn.k_proj.weight", - "layers.{}.attention.wq.weight": "layers.{}.attn.q_proj.weight", - "layers.{}.attention.wv.weight": "layers.{}.attn.v_proj.weight", - "layers.{}.attention.wo.weight": "layers.{}.attn.output_proj.weight", - "layers.{}.attention_norm.weight": "layers.{}.sa_norm.scale", - "layers.{}.ffn_norm.weight": "layers.{}.mlp_norm.scale", - "layers.{}.feed_forward.w1.weight": "layers.{}.mlp.w1.weight", - "layers.{}.feed_forward.w2.weight": "layers.{}.mlp.w2.weight", - "layers.{}.feed_forward.w3.weight": "layers.{}.mlp.w3.weight", -} - - -def smollm_tune_to_meta(state_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: - """ - Convert a state dict from torchtune's format to Meta's format. This function - doesn't handle any sharding or splitting of state dicts. It follows the - state_dict IN -> state_dict OUT pattern. - - Args: - state_dict (Dict[str, torch.Tensor]): State dict in torchtune's format. - - Returns: - Dict[str, torch.Tensor]: State dict in Meta's format. - """ - converted_state_dict = {} - inverted_mapping_dict = {v: k for k, v in _SMOLLM_FROM_META.items()} - for key, value in state_dict.items(): - new_key = get_mapped_key(key, inverted_mapping_dict) - converted_state_dict[new_key] = value - - return converted_state_dict - - -def main(): - parser = argparse.ArgumentParser( - description="Convert SmolLM weights to Meta format." - ) - parser.add_argument( - "input_dir", - type=str, - help="Path to directory containing checkpoint files", - ) - parser.add_argument("output", type=str, help="Path to the output checkpoint") - - args = parser.parse_args() - - # Don't necessarily need to use TorchTune checkpointer, can just aggregate checkpoint files by ourselves. - checkpointer = FullModelHFCheckpointer( - checkpoint_dir=args.input_dir, - checkpoint_files=["model.safetensors"], - output_dir=".", - model_type="LLAMA", - ) - - print("Loading checkpoint...") - sd = checkpointer.load_checkpoint() - - print("Converting checkpoint...") - sd = smollm_tune_to_meta(sd["model"]) - - torch.save(sd, args.output) - print(f"Checkpoint saved to {args.output}") - - -if __name__ == "__main__": - main()