Skip to content

Commit 7f9643c

Browse files
committed
fix snippets
1 parent a037d93 commit 7f9643c

File tree

4 files changed

+20
-24
lines changed

4 files changed

+20
-24
lines changed

docs/api-inference/tasks/chat-completion.md

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,8 @@ curl 'https://api-inference.huggingface.co/models/google/gemma-2-2b-it/v1/chat/c
7979
</curl>
8080

8181
<python>
82-
<huggingface_hub>
8382
```py
83+
<huggingface_hub>
8484
from huggingface_hub import InferenceClient
8585

8686
client = InferenceClient(api_key="hf_***")
@@ -101,11 +101,9 @@ stream = client.chat.completions.create(
101101

102102
for chunk in stream:
103103
print(chunk.choices[0].delta.content, end="")
104-
```
105104
</huggingface_hub>
106105

107106
<openai>
108-
```py
109107
from openai import OpenAI
110108

111109
client = OpenAI(
@@ -129,15 +127,15 @@ stream = client.chat.completions.create(
129127

130128
for chunk in stream:
131129
print(chunk.choices[0].delta.content, end="")
132-
```
133130
</openai>
131+
```
134132

135133
To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.chat_completion).
136134
</python>
137135

138136
<js>
139-
<huggingface_hub>
140137
```js
138+
<huggingface_hub>
141139
import { HfInference } from "@huggingface/inference"
142140

143141
const client = new HfInference("hf_***")
@@ -162,11 +160,9 @@ for await (const chunk of stream) {
162160
console.log(newContent);
163161
}
164162
}
165-
```
166163
</huggingface_hub>
167164

168165
<openai>
169-
```js
170166
import { OpenAI } from "openai"
171167

172168
const client = new OpenAI({
@@ -195,8 +191,8 @@ for await (const chunk of stream) {
195191
console.log(newContent);
196192
}
197193
}
198-
```
199194
</openai>
195+
```
200196

201197
To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#chatcompletion).
202198
</js>
@@ -241,8 +237,8 @@ curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Visio
241237
</curl>
242238

243239
<python>
244-
<huggingface_hub>
245240
```py
241+
<huggingface_hub>
246242
from huggingface_hub import InferenceClient
247243

248244
client = InferenceClient(api_key="hf_***")
@@ -274,11 +270,9 @@ stream = client.chat.completions.create(
274270

275271
for chunk in stream:
276272
print(chunk.choices[0].delta.content, end="")
277-
```
278273
</huggingface_hub>
279274

280275
<openai>
281-
```py
282276
from openai import OpenAI
283277

284278
client = OpenAI(
@@ -313,15 +307,15 @@ stream = client.chat.completions.create(
313307

314308
for chunk in stream:
315309
print(chunk.choices[0].delta.content, end="")
316-
```
317310
</openai>
311+
```
318312

319313
To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.chat_completion).
320314
</python>
321315

322316
<js>
323-
<huggingface_hub>
324317
```js
318+
<huggingface_hub>
325319
import { HfInference } from "@huggingface/inference"
326320

327321
const client = new HfInference("hf_***")
@@ -357,11 +351,9 @@ for await (const chunk of stream) {
357351
console.log(newContent);
358352
}
359353
}
360-
```
361354
</huggingface_hub>
362355

363356
<openai>
364-
```js
365357
import { OpenAI } from "openai"
366358

367359
const client = new OpenAI({
@@ -401,8 +393,8 @@ for await (const chunk of stream) {
401393
console.log(newContent);
402394
}
403395
}
404-
```
405396
</openai>
397+
```
406398

407399
To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#chatcompletion).
408400
</js>

docs/api-inference/tasks/image-text-to-text.md

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ curl https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision
4545
</curl>
4646

4747
<python>
48-
<huggingface_hub>
4948
```py
49+
<huggingface_hub>
5050
import requests
5151

5252
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct"
@@ -67,11 +67,9 @@ stream = client.chat.completions.create(
6767

6868
for chunk in stream:
6969
print(chunk.choices[0].delta.content, end="")
70-
```
7170
</huggingface_hub>
7271

7372
<openai>
74-
```py
7573
import requests
7674

7775
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct"
@@ -95,8 +93,8 @@ stream = client.chat.completions.create(
9593

9694
for chunk in stream:
9795
print(chunk.choices[0].delta.content, end="")
98-
```
9996
</openai>
97+
```
10098

10199
To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.image_text-to-text).
102100
</python>

scripts/api-inference/scripts/generate.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,16 +100,16 @@ const TASKS_DATA = (await response.json()) as any;
100100
///////////////////////
101101

102102
const formatSnippets = (result: snippets.types.InferenceSnippet | snippets.types.InferenceSnippet[], defaultClient: string, language: string): string => {
103-
// For single snippet, return just the content (let the template handle the wrapping)
103+
// For single snippet, return just the content
104104
if (!Array.isArray(result) || result.length === 1) {
105105
const snippet = Array.isArray(result) ? result[0] : result;
106-
return `\`\`\`${language}\n${snippet.content}\n\`\`\``;
106+
return snippet.content;
107107
}
108108

109-
// For multiple snippets, include the client tags
109+
// For multiple snippets, return just the content with client info
110110
return result
111111
.map(snippet =>
112-
`<${snippet.client || defaultClient}>\n\`\`\`${language}\n${snippet.content}\n\`\`\`\n</${snippet.client || defaultClient}>`
112+
`<${snippet.client || defaultClient}>\n${snippet.content}\n</${snippet.client || defaultClient}>`
113113
)
114114
.join('\n\n');
115115
};

scripts/api-inference/templates/common/snippets-template.handlebars

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,18 @@
55
{{!-- cURL snippet (if exists) --}}
66
{{#if taskSnippets.curl}}
77
<curl>
8+
```bash
89
{{{taskSnippets.curl}}}
10+
```
911
</curl>
1012
{{/if}}
1113

1214
{{!-- Python snippet (if exists) --}}
1315
{{#if taskSnippets.python}}
1416
<python>
17+
```py
1518
{{{taskSnippets.python}}}
19+
```
1620

1721
To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.{{taskSnakeCase}}).
1822
</python>
@@ -21,7 +25,9 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu
2125
{{!-- JavaScript snippet (if exists) --}}
2226
{{#if taskSnippets.javascript}}
2327
<js>
28+
```js
2429
{{{taskSnippets.javascript}}}
30+
```
2531

2632
To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#{{taskAttached}}).
2733
</js>

0 commit comments

Comments
 (0)