Skip to content

Commit 91fffb0

Browse files
committed
separate snippets
1 parent 435a3fe commit 91fffb0

File tree

4 files changed

+30
-45
lines changed

4 files changed

+30
-45
lines changed

docs/api-inference/tasks/chat-completion.md

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,6 @@ curl 'https://api-inference.huggingface.co/models/google/gemma-2-2b-it/v1/chat/c
8080

8181
<python>
8282
```py
83-
# With huggingface_hub client
84-
8583
from huggingface_hub import InferenceClient
8684

8785
client = InferenceClient(api_key="hf_***")
@@ -102,9 +100,9 @@ stream = client.chat.completions.create(
102100

103101
for chunk in stream:
104102
print(chunk.choices[0].delta.content, end="")
103+
```
105104

106-
# With openai client
107-
105+
```py
108106
from openai import OpenAI
109107

110108
client = OpenAI(
@@ -135,8 +133,6 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu
135133

136134
<js>
137135
```js
138-
// With huggingface_hub client
139-
140136
import { HfInference } from "@huggingface/inference"
141137

142138
const client = new HfInference("hf_***")
@@ -161,9 +157,9 @@ for await (const chunk of stream) {
161157
console.log(newContent);
162158
}
163159
}
160+
```
164161

165-
// With openai client
166-
162+
```js
167163
import { OpenAI } from "openai"
168164

169165
const client = new OpenAI({
@@ -238,8 +234,6 @@ curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Visio
238234

239235
<python>
240236
```py
241-
# With huggingface_hub client
242-
243237
from huggingface_hub import InferenceClient
244238

245239
client = InferenceClient(api_key="hf_***")
@@ -271,9 +265,9 @@ stream = client.chat.completions.create(
271265

272266
for chunk in stream:
273267
print(chunk.choices[0].delta.content, end="")
268+
```
274269

275-
# With openai client
276-
270+
```py
277271
from openai import OpenAI
278272

279273
client = OpenAI(
@@ -315,8 +309,6 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu
315309

316310
<js>
317311
```js
318-
// With huggingface_hub client
319-
320312
import { HfInference } from "@huggingface/inference"
321313

322314
const client = new HfInference("hf_***")
@@ -352,9 +344,9 @@ for await (const chunk of stream) {
352344
console.log(newContent);
353345
}
354346
}
347+
```
355348

356-
// With openai client
357-
349+
```js
358350
import { OpenAI } from "openai"
359351

360352
const client = new OpenAI({

docs/api-inference/tasks/image-text-to-text.md

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,6 @@ curl https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision
4646

4747
<python>
4848
```py
49-
# With huggingface_hub client
50-
5149
import requests
5250

5351
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct"
@@ -68,9 +66,9 @@ stream = client.chat.completions.create(
6866

6967
for chunk in stream:
7068
print(chunk.choices[0].delta.content, end="")
69+
```
7170

72-
# With openai client
73-
71+
```py
7472
import requests
7573

7674
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct"

scripts/api-inference/scripts/generate.ts

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -103,20 +103,13 @@ const formatSnippets = (result: snippets.types.InferenceSnippet | snippets.types
103103
// For single snippet, return just the content
104104
if (!Array.isArray(result) || result.length === 1) {
105105
const snippet = Array.isArray(result) ? result[0] : result;
106-
return snippet.content;
106+
return `\`\`\`${language}\n${snippet.content}\n\`\`\``;
107107
}
108108

109-
// Get the appropriate comment
110-
const commentPrefix = {
111-
'py': '#',
112-
'js': '//',
113-
'bash': '#'
114-
}[language] || '#';
115-
116-
// Show the snippet for each client
109+
// For multiple snippets, wrap each one in its own code block
117110
return result
118111
.map(snippet =>
119-
`${commentPrefix} With ${snippet.client || defaultClient} client\n\n${snippet.content}`
112+
`\`\`\`${language}\n${snippet.content}\n\`\`\``
120113
)
121114
.join('\n\n');
122115
};
@@ -148,16 +141,14 @@ export function getInferenceSnippet(
148141
id: string,
149142
pipeline_tag: PipelineType,
150143
language: InferenceSnippetLanguage,
151-
config?: JsonObject,
152-
tags?: string[],
153144
): string | undefined {
154145
const modelData = {
155146
id,
156147
pipeline_tag,
157148
mask_token: "[MASK]",
158149
library_name: "",
159-
config: config ?? {},
160-
tags: tags ?? [],
150+
config: {},
151+
tags: [],
161152
};
162153
// @ts-ignore
163154
if (HAS_SNIPPET_FN[language](modelData)) {
@@ -507,15 +498,25 @@ function fetchChatCompletion() {
507498
);
508499

509500
const mainModel = DATA.models[task.name][0];
501+
const mainModelData = {
502+
// @ts-ignore
503+
id: mainModel.id,
504+
pipeline_tag: task.pipelineTag,
505+
mask_token: "",
506+
library_name: "",
507+
// @ts-ignore
508+
tags: ["conversational"],
509+
// @ts-ignore
510+
config: mainModel.config,
511+
};
510512
const taskSnippets = {
511513
// @ts-ignore
512-
curl: getInferenceSnippet(mainModel.id, task.pipelineTag, "curl", mainModel.config, ["conversational"]),
514+
curl: GET_SNIPPET_FN["curl"](mainModelData, "hf_***"),
513515
// @ts-ignore
514-
python: getInferenceSnippet(mainModel.id, task.pipelineTag, "python", mainModel.config, ["conversational"]),
516+
python: GET_SNIPPET_FN["python"](mainModelData, "hf_***"),
515517
// @ts-ignore
516-
javascript: getInferenceSnippet(mainModel.id, task.pipelineTag, "js", mainModel.config, ["conversational"]),
518+
javascript: GET_SNIPPET_FN["js"](mainModelData, "hf_***"),
517519
};
518-
console.log(taskSnippets);
519520
DATA.snippets[task.name] = SNIPPETS_TEMPLATE({
520521
taskSnippets,
521522
taskSnakeCase: baseName.replace("-", "_"),
@@ -548,4 +549,4 @@ await Promise.all(
548549
}),
549550
);
550551

551-
console.log("✅ All done!");
552+
console.log("✅ All done!");

scripts/api-inference/templates/common/snippets-template.handlebars

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,18 +5,14 @@
55
{{!-- cURL snippet (if exists) --}}
66
{{#if taskSnippets.curl}}
77
<curl>
8-
```bash
98
{{{taskSnippets.curl}}}
10-
```
119
</curl>
1210
{{/if}}
1311

1412
{{!-- Python snippet (if exists) --}}
1513
{{#if taskSnippets.python}}
1614
<python>
17-
```py
1815
{{{taskSnippets.python}}}
19-
```
2016

2117
To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.{{taskSnakeCase}}).
2218
</python>
@@ -25,9 +21,7 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu
2521
{{!-- JavaScript snippet (if exists) --}}
2622
{{#if taskSnippets.javascript}}
2723
<js>
28-
```js
2924
{{{taskSnippets.javascript}}}
30-
```
3125

3226
To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#{{taskAttached}}).
3327
</js>

0 commit comments

Comments
 (0)