From 7017fdd378e5155a92fc8de11d78c1f003aacc5f Mon Sep 17 00:00:00 2001 From: Wauplin <11801849+Wauplin@users.noreply.github.com> Date: Fri, 16 May 2025 03:26:06 +0000 Subject: [PATCH] Update Inference Providers documentation (automated) --- .../inference-providers/providers/cerebras.md | 2 +- .../providers/hf-inference.md | 12 +------- docs/inference-providers/providers/nebius.md | 10 +++++++ docs/inference-providers/providers/nscale.md | 30 +++++++++++++++++++ .../providers/replicate.md | 2 +- .../providers/sambanova.md | 2 +- .../tasks/audio-classification.md | 6 +--- .../tasks/chat-completion.md | 5 ++-- .../tasks/feature-extraction.md | 2 +- .../tasks/image-classification.md | 1 + .../tasks/image-segmentation.md | 1 + .../tasks/summarization.md | 1 + .../tasks/text-classification.md | 1 - .../tasks/text-generation.md | 1 + .../tasks/text-to-image.md | 2 +- .../tasks/text-to-video.md | 2 +- scripts/inference-providers/package.json | 4 +-- scripts/inference-providers/pnpm-lock.yaml | 30 +++++++++---------- 18 files changed, 72 insertions(+), 42 deletions(-) diff --git a/docs/inference-providers/providers/cerebras.md b/docs/inference-providers/providers/cerebras.md index 43b736c69..940cb0a0b 100644 --- a/docs/inference-providers/providers/cerebras.md +++ b/docs/inference-providers/providers/cerebras.md @@ -44,7 +44,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion). diff --git a/docs/inference-providers/providers/hf-inference.md b/docs/inference-providers/providers/hf-inference.md index d1cc6dc4b..4d253c83f 100644 --- a/docs/inference-providers/providers/hf-inference.md +++ b/docs/inference-providers/providers/hf-inference.md @@ -39,16 +39,6 @@ If you are interested in deploying models to a dedicated and autoscaling infrast ## Supported tasks -### Audio Classification - -Find out more about Audio Classification [here](../tasks/audio_classification). - - - - ### Automatic Speech Recognition Find out more about Automatic Speech Recognition [here](../tasks/automatic_speech_recognition). @@ -75,7 +65,7 @@ Find out more about Chat Completion (VLM) [here](../tasks/chat-completion). diff --git a/docs/inference-providers/providers/nebius.md b/docs/inference-providers/providers/nebius.md index 53db03a1b..dbdfc662c 100644 --- a/docs/inference-providers/providers/nebius.md +++ b/docs/inference-providers/providers/nebius.md @@ -58,6 +58,16 @@ Find out more about Chat Completion (VLM) [here](../tasks/chat-completion). conversational /> +### Feature Extraction + +Find out more about Feature Extraction [here](../tasks/feature_extraction). + + + + ### Text To Image Find out more about Text To Image [here](../tasks/text_to_image). diff --git a/docs/inference-providers/providers/nscale.md b/docs/inference-providers/providers/nscale.md index 228abfd24..5da7783ec 100644 --- a/docs/inference-providers/providers/nscale.md +++ b/docs/inference-providers/providers/nscale.md @@ -39,3 +39,33 @@ Built on this foundation, Nscale's inference service empowers developers with a ## Supported tasks + +### Chat Completion (LLM) + +Find out more about Chat Completion (LLM) [here](../tasks/chat-completion). + + + + +### Chat Completion (VLM) + +Find out more about Chat Completion (VLM) [here](../tasks/chat-completion). + + + + +### Text To Image + +Find out more about Text To Image [here](../tasks/text_to_image). + + + diff --git a/docs/inference-providers/providers/replicate.md b/docs/inference-providers/providers/replicate.md index 657cee3f5..c1cd26b93 100644 --- a/docs/inference-providers/providers/replicate.md +++ b/docs/inference-providers/providers/replicate.md @@ -54,6 +54,6 @@ Find out more about Text To Video [here](../tasks/text_to_video). diff --git a/docs/inference-providers/providers/sambanova.md b/docs/inference-providers/providers/sambanova.md index 53eb87486..ced28a690 100644 --- a/docs/inference-providers/providers/sambanova.md +++ b/docs/inference-providers/providers/sambanova.md @@ -45,7 +45,7 @@ Find out more about Chat Completion (LLM) [here](../tasks/chat-completion). diff --git a/docs/inference-providers/tasks/audio-classification.md b/docs/inference-providers/tasks/audio-classification.md index a066b7be7..fcc01910f 100644 --- a/docs/inference-providers/tasks/audio-classification.md +++ b/docs/inference-providers/tasks/audio-classification.md @@ -29,17 +29,13 @@ For more details about the `audio-classification` task, check out its [dedicated ### Recommended models -- [ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition](https://huggingface.co/ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition): An emotion recognition model. Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=audio-classification&sort=trending). ### Using the API - +There are currently no snippet examples for the **audio-classification** task, as no providers support it yet. diff --git a/docs/inference-providers/tasks/chat-completion.md b/docs/inference-providers/tasks/chat-completion.md index ded878c14..69967cbad 100644 --- a/docs/inference-providers/tasks/chat-completion.md +++ b/docs/inference-providers/tasks/chat-completion.md @@ -22,6 +22,7 @@ This is a subtask of [`text-generation`](https://huggingface.co/docs/inference-p #### Conversational Large Language Models (LLMs) - [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it): A text-generation model trained to follow instructions. +- [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B): Smaller variant of one of the most powerful models. - [meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct): Very powerful text generation model trained to follow instructions. - [microsoft/phi-4](https://huggingface.co/microsoft/phi-4): Powerful text generation model by Microsoft. - [Qwen/Qwen2.5-7B-Instruct-1M](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-1M): Strong conversational model that supports very long instructions. @@ -60,7 +61,7 @@ The API supports: @@ -70,7 +71,7 @@ conversational /> diff --git a/docs/inference-providers/tasks/feature-extraction.md b/docs/inference-providers/tasks/feature-extraction.md index 3d9e60fd0..9fb6c2283 100644 --- a/docs/inference-providers/tasks/feature-extraction.md +++ b/docs/inference-providers/tasks/feature-extraction.md @@ -38,7 +38,7 @@ Explore all available models and find the one that suits you best [here](https:/ diff --git a/docs/inference-providers/tasks/image-classification.md b/docs/inference-providers/tasks/image-classification.md index 6c3864f8d..cc68b01fd 100644 --- a/docs/inference-providers/tasks/image-classification.md +++ b/docs/inference-providers/tasks/image-classification.md @@ -26,6 +26,7 @@ For more details about the `image-classification` task, check out its [dedicated - [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224): A strong image classification model. - [facebook/deit-base-distilled-patch16-224](https://huggingface.co/facebook/deit-base-distilled-patch16-224): A robust image classification model. +- [facebook/convnext-large-224](https://huggingface.co/facebook/convnext-large-224): A strong image classification model. Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-classification&sort=trending). diff --git a/docs/inference-providers/tasks/image-segmentation.md b/docs/inference-providers/tasks/image-segmentation.md index 1ceca0e68..197800740 100644 --- a/docs/inference-providers/tasks/image-segmentation.md +++ b/docs/inference-providers/tasks/image-segmentation.md @@ -24,6 +24,7 @@ For more details about the `image-segmentation` task, check out its [dedicated p ### Recommended models +- [openmmlab/upernet-convnext-small](https://huggingface.co/openmmlab/upernet-convnext-small): Solid semantic segmentation model trained on ADE20k. - [facebook/mask2former-swin-large-coco-panoptic](https://huggingface.co/facebook/mask2former-swin-large-coco-panoptic): Panoptic segmentation model trained on the COCO (common objects) dataset. Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-segmentation&sort=trending). diff --git a/docs/inference-providers/tasks/summarization.md b/docs/inference-providers/tasks/summarization.md index 6e0ff5ead..6d3994406 100644 --- a/docs/inference-providers/tasks/summarization.md +++ b/docs/inference-providers/tasks/summarization.md @@ -25,6 +25,7 @@ For more details about the `summarization` task, check out its [dedicated page]( ### Recommended models - [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn): A strong summarization model trained on English news articles. Excels at generating factual summaries. +- [Falconsai/medical_summarization](https://huggingface.co/Falconsai/medical_summarization): A summarization model trained on medical articles. Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=summarization&sort=trending). diff --git a/docs/inference-providers/tasks/text-classification.md b/docs/inference-providers/tasks/text-classification.md index 4d9d27f7c..48934e758 100644 --- a/docs/inference-providers/tasks/text-classification.md +++ b/docs/inference-providers/tasks/text-classification.md @@ -26,7 +26,6 @@ For more details about the `text-classification` task, check out its [dedicated - [distilbert/distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english): A robust model trained for sentiment analysis. - [ProsusAI/finbert](https://huggingface.co/ProsusAI/finbert): A sentiment analysis model specialized in financial sentiment. -- [meta-llama/Prompt-Guard-86M](https://huggingface.co/meta-llama/Prompt-Guard-86M): A model that can classify text generation attacks. Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=text-classification&sort=trending). diff --git a/docs/inference-providers/tasks/text-generation.md b/docs/inference-providers/tasks/text-generation.md index 0b1298517..22eabb509 100644 --- a/docs/inference-providers/tasks/text-generation.md +++ b/docs/inference-providers/tasks/text-generation.md @@ -27,6 +27,7 @@ For more details about the `text-generation` task, check out its [dedicated page ### Recommended models - [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it): A text-generation model trained to follow instructions. +- [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B): Smaller variant of one of the most powerful models. - [meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct): Very powerful text generation model trained to follow instructions. - [microsoft/phi-4](https://huggingface.co/microsoft/phi-4): Powerful text generation model by Microsoft. - [Qwen/Qwen2.5-7B-Instruct-1M](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-1M): Strong conversational model that supports very long instructions. diff --git a/docs/inference-providers/tasks/text-to-image.md b/docs/inference-providers/tasks/text-to-image.md index f00698262..86ebefabe 100644 --- a/docs/inference-providers/tasks/text-to-image.md +++ b/docs/inference-providers/tasks/text-to-image.md @@ -36,7 +36,7 @@ Explore all available models and find the one that suits you best [here](https:/ diff --git a/docs/inference-providers/tasks/text-to-video.md b/docs/inference-providers/tasks/text-to-video.md index 18411dc97..8113ba25a 100644 --- a/docs/inference-providers/tasks/text-to-video.md +++ b/docs/inference-providers/tasks/text-to-video.md @@ -35,7 +35,7 @@ Explore all available models and find the one that suits you best [here](https:/ diff --git a/scripts/inference-providers/package.json b/scripts/inference-providers/package.json index 5f35711b9..ee8f99c8f 100644 --- a/scripts/inference-providers/package.json +++ b/scripts/inference-providers/package.json @@ -14,8 +14,8 @@ "author": "", "license": "ISC", "dependencies": { - "@huggingface/inference": "^3.11.0", - "@huggingface/tasks": "^0.19.1", + "@huggingface/inference": "^3.13.1", + "@huggingface/tasks": "^0.19.5", "@types/node": "^22.5.0", "handlebars": "^4.7.8", "node": "^20.17.0", diff --git a/scripts/inference-providers/pnpm-lock.yaml b/scripts/inference-providers/pnpm-lock.yaml index ddc355e00..e91415f5e 100644 --- a/scripts/inference-providers/pnpm-lock.yaml +++ b/scripts/inference-providers/pnpm-lock.yaml @@ -9,11 +9,11 @@ importers: .: dependencies: '@huggingface/inference': - specifier: ^3.11.0 - version: 3.11.0 + specifier: ^3.13.1 + version: 3.13.1 '@huggingface/tasks': - specifier: ^0.19.1 - version: 0.19.1 + specifier: ^0.19.5 + version: 0.19.5 '@types/node': specifier: ^22.5.0 version: 22.5.0 @@ -189,16 +189,16 @@ packages: cpu: [x64] os: [win32] - '@huggingface/inference@3.11.0': - resolution: {integrity: sha512-4/qwTDYoVJLzoWN+rKk/8RCcEsqWFBRgzVr6wfrW0gEojzmsx4cgIXhEnPFG4s98OfjXuBqG7P0glLGqItrAaw==} + '@huggingface/inference@3.13.1': + resolution: {integrity: sha512-RIFaC295cL/0tco+HboS8abeApfn01/tWFgjYwLo6vRYGkuQBSzwgHuAdouOZu62UQYwrWA4Eyv1e+4zBxc+dQ==} engines: {node: '>=18'} - '@huggingface/jinja@0.4.1': - resolution: {integrity: sha512-3WXbMFaPkk03LRCM0z0sylmn8ddDm4ubjU7X+Hg4M2GOuMklwoGAFXp9V2keq7vltoB/c7McE5aHUVVddAewsw==} + '@huggingface/jinja@0.5.0': + resolution: {integrity: sha512-Ptc03/jGRiYRoi0bUYKZ14MkDslsBRT24oxmsvUlfYrvQMldrxCevhPnT+hfX8awKTT8/f/0ZBBWldoeAcMHdQ==} engines: {node: '>=18'} - '@huggingface/tasks@0.19.1': - resolution: {integrity: sha512-cocq5+jkmh8+qIvWDCyC9nkf7qCpmBRrJL/WPWhfIMXWEv2dMYMY62ylFv+dq/vRwpe+/5h0WlnZGOSIa+3OJg==} + '@huggingface/tasks@0.19.5': + resolution: {integrity: sha512-WEwM/tZsRqw0TinTkQwqPsK5pdaGffAV6Nu6xxSTSZUsBvkDJxE9kTiMNSwvjeHvt9/MYAJKjZ+CMSo6Rugs4g==} '@jridgewell/resolve-uri@3.1.2': resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} @@ -415,14 +415,14 @@ snapshots: '@esbuild/win32-x64@0.23.1': optional: true - '@huggingface/inference@3.11.0': + '@huggingface/inference@3.13.1': dependencies: - '@huggingface/jinja': 0.4.1 - '@huggingface/tasks': 0.19.1 + '@huggingface/jinja': 0.5.0 + '@huggingface/tasks': 0.19.5 - '@huggingface/jinja@0.4.1': {} + '@huggingface/jinja@0.5.0': {} - '@huggingface/tasks@0.19.1': {} + '@huggingface/tasks@0.19.5': {} '@jridgewell/resolve-uri@3.1.2': {}