diff --git a/docs/api-inference/tasks/chat-completion.md b/docs/api-inference/tasks/chat-completion.md index ef9a66fa9..e4aac572d 100644 --- a/docs/api-inference/tasks/chat-completion.md +++ b/docs/api-inference/tasks/chat-completion.md @@ -25,13 +25,12 @@ This is a subtask of [`text-generation`](https://huggingface.co/docs/api-inferen - [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B): Smaller variant of one of the most powerful models. - [meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct): Very powerful text generation model trained to follow instructions. - [microsoft/phi-4](https://huggingface.co/microsoft/phi-4): Powerful text generation model by Microsoft. -- [PowerInfer/SmallThinker-3B-Preview](https://huggingface.co/PowerInfer/SmallThinker-3B-Preview): A very powerful model with reasoning capabilities. - [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct): Text generation model used to write code. - [deepseek-ai/DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1): Powerful reasoning based open large language model. #### Conversational Vision-Language Models (VLMs) -- [Qwen/QVQ-72B-Preview](https://huggingface.co/Qwen/QVQ-72B-Preview): Image-text-to-text model with reasoning capabilities. +- [Qwen/Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct): Strong image-text-to-text model. ### API Playground @@ -214,11 +213,11 @@ To use the JavaScript client, see `huggingface.js`'s [package reference](https:/ ```bash -curl 'https://router.huggingface.co/hf-inference/models/Qwen/QVQ-72B-Preview/v1/chat/completions' \ +curl 'https://router.huggingface.co/hf-inference/models/Qwen/Qwen2.5-VL-7B-Instruct/v1/chat/completions' \ -H 'Authorization: Bearer hf_***' \ -H 'Content-Type: application/json' \ --data '{ - "model": "Qwen/QVQ-72B-Preview", + "model": "Qwen/Qwen2.5-VL-7B-Instruct", "messages": [ { "role": "user", @@ -271,7 +270,7 @@ messages = [ ] stream = client.chat.completions.create( - model="Qwen/QVQ-72B-Preview", + model="Qwen/Qwen2.5-VL-7B-Instruct", messages=messages, max_tokens=500, stream=True @@ -309,7 +308,7 @@ messages = [ ] stream = client.chat.completions.create( - model="Qwen/QVQ-72B-Preview", + model="Qwen/Qwen2.5-VL-7B-Instruct", messages=messages, max_tokens=500, stream=True @@ -332,7 +331,7 @@ const client = new HfInference("hf_***"); let out = ""; const stream = client.chatCompletionStream({ - model: "Qwen/QVQ-72B-Preview", + model: "Qwen/Qwen2.5-VL-7B-Instruct", messages: [ { role: "user", @@ -375,7 +374,7 @@ const client = new OpenAI({ let out = ""; const stream = await client.chat.completions.create({ - model: "Qwen/QVQ-72B-Preview", + model: "Qwen/Qwen2.5-VL-7B-Instruct", messages: [ { role: "user", @@ -458,7 +457,7 @@ To use the JavaScript client, see `huggingface.js`'s [package reference](https:/ | **stop** | _string[]_ | Up to 4 sequences where the API will stop generating further tokens. | | **stream** | _boolean_ | | | **stream_options** | _object_ | | -| **        include_usage*** | _boolean_ | If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value. | +| **        include_usage** | _boolean_ | If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value. | | **temperature** | _number_ | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | **tool_choice** | _unknown_ | One of the following: | | **         (#1)** | _enum_ | Possible values: auto. | @@ -542,7 +541,7 @@ For more information about streaming, check out [this guide](https://huggingface | **                        tool_call_id** | _string_ | | | **                 (#2)** | _object_ | | | **                        role** | _string_ | | -| **                        tool_calls** | _object_ | | +| **                        tool_calls** | _object[]_ | | | **                                function** | _object_ | | | **                                        arguments** | _string_ | | | **                                        name** | _string_ | | diff --git a/docs/api-inference/tasks/image-classification.md b/docs/api-inference/tasks/image-classification.md index b65bb686a..0a0eb78c8 100644 --- a/docs/api-inference/tasks/image-classification.md +++ b/docs/api-inference/tasks/image-classification.md @@ -26,6 +26,7 @@ For more details about the `image-classification` task, check out its [dedicated - [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224): A strong image classification model. - [facebook/deit-base-distilled-patch16-224](https://huggingface.co/facebook/deit-base-distilled-patch16-224): A robust image classification model. +- [facebook/convnext-large-224](https://huggingface.co/facebook/convnext-large-224): A strong image classification model. Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-classification&sort=trending). diff --git a/docs/api-inference/tasks/image-text-to-text.md b/docs/api-inference/tasks/image-text-to-text.md index ae4f7e7d9..14903fb01 100644 --- a/docs/api-inference/tasks/image-text-to-text.md +++ b/docs/api-inference/tasks/image-text-to-text.md @@ -24,7 +24,7 @@ For more details about the `image-text-to-text` task, check out its [dedicated p ### Recommended models -- [Qwen/QVQ-72B-Preview](https://huggingface.co/Qwen/QVQ-72B-Preview): Image-text-to-text model with reasoning capabilities. +- [Qwen/Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct): Strong image-text-to-text model. Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-text-to-text&sort=trending). @@ -35,7 +35,7 @@ Explore all available models and find the one that suits you best [here](https:/ ```bash -curl https://router.huggingface.co/hf-inference/models/Qwen/QVQ-72B-Preview \ +curl https://router.huggingface.co/hf-inference/models/Qwen/Qwen2.5-VL-7B-Instruct \ -X POST \ -d '{"inputs": "Can you please let us know more details about your "}' \ -H 'Content-Type: application/json' \ @@ -56,7 +56,7 @@ client = InferenceClient( messages = "\"Can you please let us know more details about your \"" stream = client.chat.completions.create( - model="Qwen/QVQ-72B-Preview", + model="Qwen/Qwen2.5-VL-7B-Instruct", messages=messages, max_tokens=500, stream=True @@ -78,7 +78,7 @@ client = OpenAI( messages = "\"Can you please let us know more details about your \"" stream = client.chat.completions.create( - model="Qwen/QVQ-72B-Preview", + model="Qwen/Qwen2.5-VL-7B-Instruct", messages=messages, max_tokens=500, stream=True @@ -95,7 +95,7 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu ```js async function query(data) { const response = await fetch( - "https://router.huggingface.co/hf-inference/models/Qwen/QVQ-72B-Preview", + "https://router.huggingface.co/hf-inference/models/Qwen/Qwen2.5-VL-7B-Instruct", { headers: { Authorization: "Bearer hf_***", diff --git a/docs/api-inference/tasks/text-generation.md b/docs/api-inference/tasks/text-generation.md index 5b6e12513..7cedd2c76 100644 --- a/docs/api-inference/tasks/text-generation.md +++ b/docs/api-inference/tasks/text-generation.md @@ -16,7 +16,7 @@ For more details, check out: Generate text based on a prompt. -If you are interested in a Chat Completion task, which generates a response based on a list of messages, check out the [`chat-completion`](./chat-completion) task. +If you are interested in a Chat Completion task, which generates a response based on a list of messages, check out the [`chat-completion`](./chat_completion) task. @@ -30,7 +30,6 @@ For more details about the `text-generation` task, check out its [dedicated page - [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B): Smaller variant of one of the most powerful models. - [meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct): Very powerful text generation model trained to follow instructions. - [microsoft/phi-4](https://huggingface.co/microsoft/phi-4): Powerful text generation model by Microsoft. -- [PowerInfer/SmallThinker-3B-Preview](https://huggingface.co/PowerInfer/SmallThinker-3B-Preview): A very powerful model with reasoning capabilities. - [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct): Text generation model used to write code. - [deepseek-ai/DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1): Powerful reasoning based open large language model. diff --git a/scripts/api-inference/package.json b/scripts/api-inference/package.json index db8c3036e..49cd7a61f 100644 --- a/scripts/api-inference/package.json +++ b/scripts/api-inference/package.json @@ -15,7 +15,7 @@ "license": "ISC", "dependencies": { "@huggingface/inference": "^3.5.0", - "@huggingface/tasks": "^0.17.0", + "@huggingface/tasks": "^0.17.4", "@types/node": "^22.5.0", "handlebars": "^4.7.8", "node": "^20.17.0", diff --git a/scripts/api-inference/pnpm-lock.yaml b/scripts/api-inference/pnpm-lock.yaml index 59e335c81..46a86a1bd 100644 --- a/scripts/api-inference/pnpm-lock.yaml +++ b/scripts/api-inference/pnpm-lock.yaml @@ -12,8 +12,8 @@ importers: specifier: ^3.5.0 version: 3.5.0 '@huggingface/tasks': - specifier: ^0.17.0 - version: 0.17.0 + specifier: ^0.17.4 + version: 0.17.4 '@types/node': specifier: ^22.5.0 version: 22.5.0 @@ -193,8 +193,8 @@ packages: resolution: {integrity: sha512-5IKkI/HJDDWg5aVWyd60kj27L9Kwxyyvu64U1To4/HzsZj13flqv2rJMrT6OB0izvFwTfUN1SDrrA5OH3YbxQQ==} engines: {node: '>=18'} - '@huggingface/tasks@0.17.0': - resolution: {integrity: sha512-tRF2gBBgt71VGTZa5Per7HzDWkQ+gzR6Ay2X2i0Cf9FJTMoHBIK0JJQ3W4wL1g4ebdDWiDppganqz8Upy3Hy4A==} + '@huggingface/tasks@0.17.4': + resolution: {integrity: sha512-LES7+OosthFKdqRL0e+bA2d4jfKmiQWuqahsPrv0+EsSZtdHdaZ3nje0f2g5wq4miHX4xWpBLuWJknjdnBwXsA==} '@jridgewell/resolve-uri@3.1.2': resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} @@ -413,9 +413,9 @@ snapshots: '@huggingface/inference@3.5.0': dependencies: - '@huggingface/tasks': 0.17.0 + '@huggingface/tasks': 0.17.4 - '@huggingface/tasks@0.17.0': {} + '@huggingface/tasks@0.17.4': {} '@jridgewell/resolve-uri@3.1.2': {}