Skip to content

Commit d8132f5

Browse files
Update Inference Providers documentation (automated) (#2054)
Co-authored-by: Wauplin <[email protected]>
1 parent f6aa18a commit d8132f5

File tree

9 files changed

+14
-14
lines changed

9 files changed

+14
-14
lines changed

docs/inference-providers/providers/featherless-ai.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion).
4949

5050
<InferenceSnippet
5151
pipeline=text-generation
52-
providersMapping={ {"featherless-ai":{"modelId":"inclusionAI/Ling-1T","providerModelId":"inclusionAI/Ling-1T"} } }
52+
providersMapping={ {"featherless-ai":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"meta-llama/Meta-Llama-3.1-8B-Instruct"} } }
5353
conversational />
5454

5555

@@ -69,6 +69,6 @@ Find out more about Text Generation [here](../tasks/text_generation).
6969

7070
<InferenceSnippet
7171
pipeline=text-generation
72-
providersMapping={ {"featherless-ai":{"modelId":"inclusionAI/Ling-1T","providerModelId":"inclusionAI/Ling-1T"} } }
72+
providersMapping={ {"featherless-ai":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"meta-llama/Meta-Llama-3.1-8B-Instruct"} } }
7373
/>
7474

docs/inference-providers/providers/fireworks-ai.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion).
4646

4747
<InferenceSnippet
4848
pipeline=text-generation
49-
providersMapping={ {"fireworks-ai":{"modelId":"MiniMaxAI/MiniMax-M2","providerModelId":"accounts/fireworks/models/minimax-m2"} } }
49+
providersMapping={ {"fireworks-ai":{"modelId":"openai/gpt-oss-20b","providerModelId":"accounts/fireworks/models/gpt-oss-20b"} } }
5050
conversational />
5151

5252

docs/inference-providers/providers/replicate.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ Find out more about Image To Image [here](../tasks/image_to_image).
5757

5858
<InferenceSnippet
5959
pipeline=image-to-image
60-
providersMapping={ {"replicate":{"modelId":"black-forest-labs/FLUX.1-Kontext-dev","providerModelId":"black-forest-labs/flux-kontext-dev"} } }
60+
providersMapping={ {"replicate":{"modelId":"Qwen/Qwen-Image-Edit","providerModelId":"qwen/qwen-image-edit"} } }
6161
/>
6262

6363

docs/inference-providers/tasks/chat-completion.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ The API supports:
6464

6565
<InferenceSnippet
6666
pipeline=text-generation
67-
providersMapping={ {"cerebras":{"modelId":"zai-org/GLM-4.6","providerModelId":"zai-glm-4.6"},"cohere":{"modelId":"CohereLabs/aya-expanse-32b","providerModelId":"c4ai-aya-expanse-32b"},"featherless-ai":{"modelId":"inclusionAI/Ling-1T","providerModelId":"inclusionAI/Ling-1T"},"fireworks-ai":{"modelId":"MiniMaxAI/MiniMax-M2","providerModelId":"accounts/fireworks/models/minimax-m2"},"groq":{"modelId":"openai/gpt-oss-20b","providerModelId":"openai/gpt-oss-20b"},"hf-inference":{"modelId":"katanemo/Arch-Router-1.5B","providerModelId":"katanemo/Arch-Router-1.5B"},"hyperbolic":{"modelId":"openai/gpt-oss-20b","providerModelId":"openai/gpt-oss-20b"},"nebius":{"modelId":"openai/gpt-oss-20b","providerModelId":"openai/gpt-oss-20b"},"novita":{"modelId":"MiniMaxAI/MiniMax-M2","providerModelId":"minimax/minimax-m2"},"nscale":{"modelId":"openai/gpt-oss-20b","providerModelId":"openai/gpt-oss-20b"},"publicai":{"modelId":"swiss-ai/Apertus-8B-Instruct-2509","providerModelId":"swiss-ai/apertus-8b-instruct"},"sambanova":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"Meta-Llama-3.1-8B-Instruct"},"scaleway":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"llama-3.1-8b-instruct"},"together":{"modelId":"moonshotai/Kimi-K2-Thinking","providerModelId":"moonshotai/Kimi-K2-Thinking"},"zai-org":{"modelId":"zai-org/GLM-4.6","providerModelId":"glm-4.6"}} }
67+
providersMapping={ {"cerebras":{"modelId":"zai-org/GLM-4.6","providerModelId":"zai-glm-4.6"},"cohere":{"modelId":"CohereLabs/aya-expanse-32b","providerModelId":"c4ai-aya-expanse-32b"},"featherless-ai":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"meta-llama/Meta-Llama-3.1-8B-Instruct"},"fireworks-ai":{"modelId":"openai/gpt-oss-20b","providerModelId":"accounts/fireworks/models/gpt-oss-20b"},"groq":{"modelId":"openai/gpt-oss-20b","providerModelId":"openai/gpt-oss-20b"},"hf-inference":{"modelId":"katanemo/Arch-Router-1.5B","providerModelId":"katanemo/Arch-Router-1.5B"},"hyperbolic":{"modelId":"openai/gpt-oss-20b","providerModelId":"openai/gpt-oss-20b"},"nebius":{"modelId":"openai/gpt-oss-20b","providerModelId":"openai/gpt-oss-20b"},"novita":{"modelId":"MiniMaxAI/MiniMax-M2","providerModelId":"minimax/minimax-m2"},"nscale":{"modelId":"openai/gpt-oss-20b","providerModelId":"openai/gpt-oss-20b"},"publicai":{"modelId":"swiss-ai/Apertus-8B-Instruct-2509","providerModelId":"swiss-ai/apertus-8b-instruct"},"sambanova":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"Meta-Llama-3.1-8B-Instruct"},"scaleway":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"llama-3.1-8b-instruct"},"together":{"modelId":"moonshotai/Kimi-K2-Thinking","providerModelId":"moonshotai/Kimi-K2-Thinking"},"zai-org":{"modelId":"zai-org/GLM-4.6","providerModelId":"glm-4.6"}} }
6868
conversational />
6969

7070

docs/inference-providers/tasks/feature-extraction.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ Explore all available models and find the one that suits you best [here](https:/
5858
| **normalize** | _boolean_ | |
5959
| **prompt_name** | _string_ | The name of the prompt that should be used by for encoding. If not set, no prompt will be applied. Must be a key in the `sentence-transformers` configuration `prompts` dictionary. For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ", ...}, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" because the prompt text will be prepended before any text to encode. |
6060
| **truncate** | _boolean_ | |
61-
| **truncation_direction** | _enum_ | Possible values: Left, Right. |
61+
| **truncation_direction** | _enum_ | Possible values: left, right. |
6262

6363

6464
#### Response

docs/inference-providers/tasks/image-to-image.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ Explore all available models and find the one that suits you best [here](https:/
3737

3838
<InferenceSnippet
3939
pipeline=image-to-image
40-
providersMapping={ {"fal-ai":{"modelId":"nvidia/ChronoEdit-14B-Diffusers","providerModelId":"fal-ai/chrono-edit"},"replicate":{"modelId":"black-forest-labs/FLUX.1-Kontext-dev","providerModelId":"black-forest-labs/flux-kontext-dev"},"wavespeed":{"modelId":"Qwen/Qwen-Image-Edit","providerModelId":"wavespeed-ai/qwen-image/edit"}} }
40+
providersMapping={ {"fal-ai":{"modelId":"nvidia/ChronoEdit-14B-Diffusers","providerModelId":"fal-ai/chrono-edit"},"replicate":{"modelId":"Qwen/Qwen-Image-Edit","providerModelId":"qwen/qwen-image-edit"},"wavespeed":{"modelId":"Qwen/Qwen-Image-Edit","providerModelId":"wavespeed-ai/qwen-image/edit"}} }
4141
/>
4242

4343

docs/inference-providers/tasks/text-generation.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ Explore all available models and find the one that suits you best [here](https:/
4040

4141
<InferenceSnippet
4242
pipeline=text-generation
43-
providersMapping={ {"featherless-ai":{"modelId":"inclusionAI/Ling-1T","providerModelId":"inclusionAI/Ling-1T"},"hf-inference":{"modelId":"katanemo/Arch-Router-1.5B","providerModelId":"katanemo/Arch-Router-1.5B"}} }
43+
providersMapping={ {"featherless-ai":{"modelId":"meta-llama/Llama-3.1-8B-Instruct","providerModelId":"meta-llama/Meta-Llama-3.1-8B-Instruct"},"hf-inference":{"modelId":"katanemo/Arch-Router-1.5B","providerModelId":"katanemo/Arch-Router-1.5B"}} }
4444
/>
4545

4646

scripts/inference-providers/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
"author": "",
1515
"license": "ISC",
1616
"dependencies": {
17-
"@huggingface/inference": "^4.13.2",
17+
"@huggingface/inference": "^4.13.3",
1818
"@huggingface/tasks": "^0.19.63",
1919
"@types/node": "^22.5.0",
2020
"handlebars": "^4.7.8",

scripts/inference-providers/pnpm-lock.yaml

Lines changed: 5 additions & 5 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)