Skip to content

Commit ba6b5d2

Browse files
authored
Merge pull request #198 from ngquangtrung57/fix-llama
Add safe load tokenizer for llama_3
2 parents 5fbcf27 + 7315dc1 commit ba6b5d2

File tree

1 file changed

+9
-1
lines changed

1 file changed

+9
-1
lines changed

llava/conversation.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,8 @@ def get_prompt(self):
9595
return ret
9696

9797
elif self.sep_style == SeparatorStyle.LLAMA_3:
98+
if self.tokenizer is None:
99+
raise ValueError("Llama 3 tokenizer is not available. Make sure you have the necessary permissions.")
98100
chat_template_messages = [{"role": "system", "content": self.system}]
99101
for role, message in messages:
100102
if message:
@@ -375,6 +377,12 @@ def dict(self):
375377
sep2="</s>",
376378
)
377379

380+
def safe_load_tokenizer(tokenizer_id):
381+
try:
382+
return AutoTokenizer.from_pretrained(tokenizer_id)
383+
except Exception:
384+
return None
385+
378386
conv_llava_llama_3 = Conversation(
379387
system="You are a helpful language and vision assistant. " "You are able to understand the visual content that the user provides, " "and assist the user with a variety of tasks using natural language.",
380388
roles=("user", "assistant"),
@@ -384,7 +392,7 @@ def dict(self):
384392
sep="<|eot_id|>",
385393
sep_style=SeparatorStyle.LLAMA_3,
386394
tokenizer_id="meta-llama/Meta-Llama-3-8B-Instruct",
387-
tokenizer=AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct"),
395+
tokenizer=safe_load_tokenizer("meta-llama/Meta-Llama-3-8B-Instruct"),
388396
stop_token_ids=[128009],
389397
)
390398

0 commit comments

Comments
 (0)