|
28 | 28 | apiToken = token; |
29 | 29 | } |
30 | 30 | } |
| 31 | +
|
| 32 | + isLoggedIn.set(true); |
31 | 33 | }); |
32 | 34 |
|
33 | 35 | const models: ModelData[] = [ |
34 | 36 | { |
35 | | - id: "mistralai/Mistral-7B-Instruct-v0.2", |
| 37 | + id: "meta-llama/Meta-Llama-3-8B-Instruct", |
36 | 38 | pipeline_tag: "text-generation", |
37 | 39 | tags: ["conversational"], |
38 | 40 | inference: InferenceDisplayability.Yes, |
39 | 41 | config: { |
40 | | - architectures: ["MistralForCausalLM"], |
41 | | - model_type: "mistral", |
| 42 | + architectures: ["LlamaForCausalLM"], |
| 43 | + model_type: "llama", |
42 | 44 | tokenizer_config: { |
43 | 45 | chat_template: |
44 | | - "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}", |
45 | | - use_default_system_prompt: false, |
| 46 | + "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}", |
| 47 | + bos_token: "<|begin_of_text|>", |
| 48 | + eos_token: "<|end_of_text|>", |
| 49 | + }, |
| 50 | + }, |
| 51 | + widgetData: [ |
| 52 | + { text: "This is a text-only example", example_title: "Text only" }, |
| 53 | + { |
| 54 | + messages: [{ content: "Please exlain QCD in very few words", role: "user" }], |
| 55 | + example_title: "Chat messages", |
| 56 | + }, |
| 57 | + { |
| 58 | + messages: [{ content: "Please exlain QCD in very few words", role: "user" }], |
| 59 | + output: { |
| 60 | + text: "QCD is the physics of strong force and small particles.", |
| 61 | + }, |
| 62 | + example_title: "Chat messages with Output", |
| 63 | + }, |
| 64 | + { |
| 65 | + text: "Explain QCD in one short sentence.", |
| 66 | + output: { |
| 67 | + text: "QCD is the physics of strong force and small particles.", |
| 68 | + }, |
| 69 | + example_title: "Text only with Output", |
| 70 | + }, |
| 71 | + { |
| 72 | + example_title: "Invalid example - unsupported role", |
| 73 | + messages: [ |
| 74 | + { role: "system", content: "This will fail because of the chat template" }, |
| 75 | + { role: "user", content: "What's your favorite condiment?" }, |
| 76 | + ], |
| 77 | + }, |
| 78 | + ], |
| 79 | + }, |
| 80 | + { |
| 81 | + id: "microsoft/Phi-3-mini-128k-instruct", |
| 82 | + pipeline_tag: "text-generation", |
| 83 | + tags: ["conversational"], |
| 84 | + inference: InferenceDisplayability.Yes, |
| 85 | + config: { |
| 86 | + architectures: ["Phi3ForCausalLM"], |
| 87 | + model_type: "phi3", |
| 88 | + tokenizer_config: { |
46 | 89 | bos_token: "<s>", |
47 | | - eos_token: "</s>", |
| 90 | + chat_template: |
| 91 | + "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}", |
| 92 | + eos_token: "<|endoftext|>", |
| 93 | + pad_token: "<|endoftext|>", |
48 | 94 | unk_token: "<unk>", |
49 | | - pad_token: null, |
50 | 95 | }, |
51 | 96 | }, |
52 | 97 | widgetData: [ |
|
0 commit comments