File tree Expand file tree Collapse file tree 1 file changed +6
-1
lines changed Expand file tree Collapse file tree 1 file changed +6
-1
lines changed Original file line number Diff line number Diff line change @@ -17,7 +17,12 @@ def get_tokenizer(
1717) -> Union [PreTrainedTokenizer , PreTrainedTokenizerFast ]:
1818 """Gets a tokenizer for the given model name via Huggingface."""
1919 config = AutoConfig .from_pretrained (model_name )
20- if config .model_type == "llama" and getattr (kwargs , "use_fast" , True ):
20+ if "open_llama" in model_name :
21+ kwargs ["use_fast" ] = False
22+ logger .info (
23+ "OpenLLaMA models do not support the fast tokenizer. "
24+ "Using the slow tokenizer instead." )
25+ elif config .model_type == "llama" and getattr (kwargs , "use_fast" , True ):
2126 # LLaMA fast tokenizer causes protobuf errors in some environments.
2227 # However, we found that the below LLaMA fast tokenizer works well in
2328 # most environments.
You can’t perform that action at this time.
0 commit comments