Skip to content

Commit eea0b04

Browse files
author
Mishig
authored
[widgets] Update test page (#648)
This PR updates the test page for widgets. The commit messages should be describing what they are doing <img width="514" alt="image" src="https://github.com/huggingface/huggingface.js/assets/11827707/bbc4028c-c460-4744-9f00-2e8482778e89">
1 parent fc11648 commit eea0b04

File tree

2 files changed

+55
-9
lines changed

2 files changed

+55
-9
lines changed

packages/widgets/src/lib/components/InferenceWidget/widgets/ConversationalWidget/ConversationalWidget.svelte

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,8 @@
5151
let inferenceClient: HfInference | undefined = undefined;
5252
let abort: AbortController | undefined = undefined;
5353
54+
$: inferenceClient = new HfInference(apiToken);
55+
5456
// Check config and compile template
5557
onMount(() => {
5658
const config = model.config;
@@ -84,8 +86,6 @@
8486
error = `Invalid chat template: "${(e as Error).message}"`;
8587
return;
8688
}
87-
88-
inferenceClient = new HfInference(apiToken);
8989
});
9090
9191
async function handleNewMessage(): Promise<void> {
@@ -165,6 +165,7 @@
165165
signal: abort?.signal,
166166
use_cache: useCache || !$isLoggedIn,
167167
wait_for_model: withModelLoading,
168+
retry_on_error: false,
168169
} satisfies Options;
169170
170171
tgiSupportedModels = await getTgiSupportedModels(apiUrl);

packages/widgets/src/routes/+page.svelte

Lines changed: 52 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -28,25 +28,70 @@
2828
apiToken = token;
2929
}
3030
}
31+
32+
isLoggedIn.set(true);
3133
});
3234
3335
const models: ModelData[] = [
3436
{
35-
id: "mistralai/Mistral-7B-Instruct-v0.2",
37+
id: "meta-llama/Meta-Llama-3-8B-Instruct",
3638
pipeline_tag: "text-generation",
3739
tags: ["conversational"],
3840
inference: InferenceDisplayability.Yes,
3941
config: {
40-
architectures: ["MistralForCausalLM"],
41-
model_type: "mistral",
42+
architectures: ["LlamaForCausalLM"],
43+
model_type: "llama",
4244
tokenizer_config: {
4345
chat_template:
44-
"{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
45-
use_default_system_prompt: false,
46+
"{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
47+
bos_token: "<|begin_of_text|>",
48+
eos_token: "<|end_of_text|>",
49+
},
50+
},
51+
widgetData: [
52+
{ text: "This is a text-only example", example_title: "Text only" },
53+
{
54+
messages: [{ content: "Please exlain QCD in very few words", role: "user" }],
55+
example_title: "Chat messages",
56+
},
57+
{
58+
messages: [{ content: "Please exlain QCD in very few words", role: "user" }],
59+
output: {
60+
text: "QCD is the physics of strong force and small particles.",
61+
},
62+
example_title: "Chat messages with Output",
63+
},
64+
{
65+
text: "Explain QCD in one short sentence.",
66+
output: {
67+
text: "QCD is the physics of strong force and small particles.",
68+
},
69+
example_title: "Text only with Output",
70+
},
71+
{
72+
example_title: "Invalid example - unsupported role",
73+
messages: [
74+
{ role: "system", content: "This will fail because of the chat template" },
75+
{ role: "user", content: "What's your favorite condiment?" },
76+
],
77+
},
78+
],
79+
},
80+
{
81+
id: "microsoft/Phi-3-mini-128k-instruct",
82+
pipeline_tag: "text-generation",
83+
tags: ["conversational"],
84+
inference: InferenceDisplayability.Yes,
85+
config: {
86+
architectures: ["Phi3ForCausalLM"],
87+
model_type: "phi3",
88+
tokenizer_config: {
4689
bos_token: "<s>",
47-
eos_token: "</s>",
90+
chat_template:
91+
"{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
92+
eos_token: "<|endoftext|>",
93+
pad_token: "<|endoftext|>",
4894
unk_token: "<unk>",
49-
pad_token: null,
5095
},
5196
},
5297
widgetData: [

0 commit comments

Comments
 (0)