Skip to content

Commit 4c01d39

Browse files
authored
[Inference doc] Next gen inference snippets (#1643)
* more advanced * better now? :) * update * big step * better? * typo
1 parent 2f3649e commit 4c01d39

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+350
-2000
lines changed

docs/inference-providers/tasks/audio-classification.md

Lines changed: 1 addition & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -38,64 +38,7 @@ Explore all available models and find the one that suits you best [here](https:/
3838
### Using the API
3939

4040

41-
<inferencesnippet>
42-
43-
<curl>
44-
```bash
45-
curl https://router.huggingface.co/hf-inference/models/speechbrain/google_speech_command_xvector \
46-
-X POST \
47-
--data-binary '@sample1.flac' \
48-
-H 'Authorization: Bearer hf_***'
49-
```
50-
</curl>
51-
52-
<python>
53-
```py
54-
import requests
55-
56-
API_URL = "https://router.huggingface.co/hf-inference/v1"
57-
headers = {"Authorization": "Bearer hf_***"}
58-
59-
def query(filename):
60-
with open(filename, "rb") as f:
61-
data = f.read()
62-
response = requests.post(API_URL, headers=headers, data=data)
63-
return response.json()
64-
65-
output = query("sample1.flac")
66-
```
67-
68-
To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.audio_classification).
69-
</python>
70-
71-
<js>
72-
```js
73-
async function query(filename) {
74-
const data = fs.readFileSync(filename);
75-
const response = await fetch(
76-
"https://router.huggingface.co/hf-inference/models/speechbrain/google_speech_command_xvector",
77-
{
78-
headers: {
79-
Authorization: "Bearer hf_***",
80-
"Content-Type": "application/json",
81-
},
82-
method: "POST",
83-
body: data,
84-
}
85-
);
86-
const result = await response.json();
87-
return result;
88-
}
89-
90-
query("sample1.flac").then((response) => {
91-
console.log(JSON.stringify(response));
92-
});
93-
```
94-
95-
To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#audioclassification).
96-
</js>
97-
98-
</inferencesnippet>
41+
No snippet available for this task.
9942

10043

10144

docs/inference-providers/tasks/automatic-speech-recognition.md

Lines changed: 4 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -37,83 +37,10 @@ Explore all available models and find the one that suits you best [here](https:/
3737
### Using the API
3838

3939

40-
<inferencesnippet>
41-
42-
<curl>
43-
```bash
44-
curl https://router.huggingface.co/hf-inference/models/openai/whisper-large-v3 \
45-
-X POST \
46-
--data-binary '@sample1.flac' \
47-
-H 'Authorization: Bearer hf_***'
48-
```
49-
</curl>
50-
51-
<python>
52-
```py
53-
import requests
54-
55-
API_URL = "https://router.huggingface.co/hf-inference/v1"
56-
headers = {"Authorization": "Bearer hf_***"}
57-
58-
def query(filename):
59-
with open(filename, "rb") as f:
60-
data = f.read()
61-
response = requests.post(API_URL, headers=headers, data=data)
62-
return response.json()
63-
64-
output = query("sample1.flac")
65-
```
66-
67-
To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.automatic_speech_recognition).
68-
</python>
69-
70-
<js>
71-
Using `huggingface.js`:
72-
```js
73-
import { HfInference } from "@huggingface/inference";
74-
75-
const client = new HfInference("hf_***");
76-
77-
const data = fs.readFileSync("sample1.flac");
78-
79-
const output = await client.automaticSpeechRecognition({
80-
data,
81-
model: "openai/whisper-large-v3",
82-
provider: "hf-inference",
83-
});
84-
85-
console.log(output);
86-
87-
```
88-
89-
Using `fetch`:
90-
```js
91-
async function query(filename) {
92-
const data = fs.readFileSync(filename);
93-
const response = await fetch(
94-
"https://router.huggingface.co/hf-inference/models/openai/whisper-large-v3",
95-
{
96-
headers: {
97-
Authorization: "Bearer hf_***",
98-
"Content-Type": "application/json",
99-
},
100-
method: "POST",
101-
body: data,
102-
}
103-
);
104-
const result = await response.json();
105-
return result;
106-
}
107-
108-
query("sample1.flac").then((response) => {
109-
console.log(JSON.stringify(response));
110-
});
111-
```
112-
113-
To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#automaticspeechrecognition).
114-
</js>
115-
116-
</inferencesnippet>
40+
<InferenceSnippet
41+
pipeline=automatic-speech-recognition
42+
providersMapping={ {"fal-ai":{"modelId":"openai/whisper-large-v3","providerModelId":"fal-ai/whisper"},"hf-inference":{"modelId":"openai/whisper-large-v3-turbo","providerModelId":"openai/whisper-large-v3-turbo"}} }
43+
/>
11744

11845

11946

0 commit comments

Comments
 (0)