@@ -27,20 +27,24 @@ await uploadFile({
2727 }
2828});
2929
30- // Use hosted inference
31-
32- await inference .translation ({
33- model: ' t5-base' ,
34- inputs: ' My name is Wolfgang and I live in Berlin'
35- })
30+ // Use Inference API
31+
32+ await inference .chatCompletion ({
33+ model: " meta-llama/Llama-3.1-8B-Instruct" ,
34+ messages: [
35+ {
36+ role: " user" ,
37+ content: " Hello, nice to meet you!" ,
38+ },
39+ ],
40+ max_tokens: 512 ,
41+ temperature: 0.5 ,
42+ });
3643
3744await inference .textToImage ({
38- model: ' stabilityai/stable-diffusion-2' ,
39- inputs: ' award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]' ,
40- parameters: {
41- negative_prompt: ' blurry' ,
42- }
43- })
45+ model: " black-forest-labs/FLUX.1-dev" ,
46+ inputs: " a picture of a green bird" ,
47+ });
4448
4549// and much more…
4650```
@@ -123,33 +127,33 @@ const inference = new HfInference(HF_TOKEN);
123127
124128// Chat completion API
125129const out = await inference .chatCompletion ({
126- model: " mistralai/Mistral-7B-Instruct-v0.2 " ,
127- messages: [{ role: " user" , content: " Complete the this sentence with words one plus one is equal " }],
128- max_tokens: 100
130+ model: " meta-llama/Llama-3.1-8B-Instruct " ,
131+ messages: [{ role: " user" , content: " Hello, nice to meet you! " }],
132+ max_tokens: 512
129133});
130134console .log (out .choices [0 ].message );
131135
132136// Streaming chat completion API
133137for await (const chunk of inference .chatCompletionStream ({
134- model: " mistralai/Mistral-7B-Instruct-v0.2 " ,
135- messages: [{ role: " user" , content: " Complete the this sentence with words one plus one is equal " }],
136- max_tokens: 100
138+ model: " meta-llama/Llama-3.1-8B-Instruct " ,
139+ messages: [{ role: " user" , content: " Hello, nice to meet you! " }],
140+ max_tokens: 512
137141})) {
138142 console .log (chunk .choices [0 ].delta .content );
139143}
140144
141145// You can also omit "model" to use the recommended model for the task
142146await inference .translation ({
143- model: ' t5-base' ,
144- inputs: ' My name is Wolfgang and I live in Amsterdam'
145- })
147+ inputs: " My name is Wolfgang and I live in Amsterdam" ,
148+ parameters: {
149+ src_lang: " en" ,
150+ tgt_lang: " fr" ,
151+ },
152+ });
146153
147154await inference .textToImage ({
148- model: ' stabilityai/stable-diffusion-2' ,
149- inputs: ' award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]' ,
150- parameters: {
151- negative_prompt: ' blurry' ,
152- }
155+ model: ' black-forest-labs/FLUX.1-dev' ,
156+ inputs: ' a picture of a green bird' ,
153157})
154158
155159await inference .imageToText ({
@@ -162,13 +166,13 @@ const gpt2 = inference.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface
162166const { generated_text } = await gpt2 .textGeneration ({inputs: ' The answer to the universe is' });
163167
164168// Chat Completion
165- const mistal = inference .endpoint (
166- " https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2 "
169+ const llamaEndpoint = inference .endpoint (
170+ " https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct "
167171);
168- const out = await mistal .chatCompletion ({
169- model: " mistralai/Mistral-7B-Instruct-v0.2 " ,
170- messages: [{ role: " user" , content: " Complete the this sentence with words one plus one is equal " }],
171- max_tokens: 100 ,
172+ const out = await llamaEndpoint .chatCompletion ({
173+ model: " meta-llama/Llama-3.1-8B-Instruct " ,
174+ messages: [{ role: " user" , content: " Hello, nice to meet you! " }],
175+ max_tokens: 512 ,
172176});
173177console .log (out .choices [0 ].message );
174178```
0 commit comments