Skip to content

Commit 0e54f56

Browse files
KeitaWmreso
authored andcommitted
use AutoTokenizer instead of LlamaTokenizer
1 parent 6da989a commit 0e54f56

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/llama_recipes/inference/checkpoint_converter_fsdp_hf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import sys
99
import yaml
1010

11-
from transformers import LlamaTokenizer
11+
from transformers import AutoTokenizer
1212

1313
from llama_recipes.inference.model_utils import load_llama_from_config
1414

@@ -56,7 +56,7 @@ def main(
5656
model = load_sharded_model_single_gpu(model_def, fsdp_checkpoint_path)
5757
print("model is loaded from FSDP checkpoints")
5858
#loading the tokenizer form the model_path
59-
tokenizer = LlamaTokenizer.from_pretrained(HF_model_path_or_name)
59+
tokenizer = AutoTokenizer.from_pretrained(HF_model_path_or_name)
6060
tokenizer.save_pretrained(consolidated_model_path)
6161
#save the FSDP sharded checkpoints in HF format
6262
model.save_pretrained(consolidated_model_path)

0 commit comments

Comments
 (0)