diff --git a/output/schema/schema-serverless.json b/output/schema/schema-serverless.json index 5a8ba133c8..2dd4f208f3 100644 --- a/output/schema/schema-serverless.json +++ b/output/schema/schema-serverless.json @@ -4765,30 +4765,15 @@ "visibility": "public" }, "stack": { -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= -======= - "since": "8.14.0", -======= "since": "8.16.0", ->>>>>>> ef980f023 (Add Alibaba Cloud inference API (#4021)) "stability": "stable", "visibility": "public" } }, -<<<<<<< HEAD - "description": "Create an Azure OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", - "docId": "inference-api-put-azureopenai", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-azure-openai.html", - "name": "inference.put_azureopenai", -======= "description": "Create an AlibabaCloud AI Search inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-alibabacloud", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-alibabacloud-ai-search.html", "name": "inference.put_alibabacloud", ->>>>>>> ef980f023 (Add Alibaba Cloud inference API (#4021)) "privileges": { "cluster": [ "manage_inference" @@ -4796,11 +4781,7 @@ }, "request": { "name": "Request", -<<<<<<< HEAD - "namespace": "inference.put_azureopenai" -======= "namespace": "inference.put_alibabacloud" ->>>>>>> ef980f023 (Add Alibaba Cloud inference API (#4021)) }, "requestBodyRequired": false, "requestMediaType": [ @@ -4808,11 +4789,7 @@ ], "response": { "name": "Response", -<<<<<<< HEAD - "namespace": "inference.put_azureopenai" -======= "namespace": "inference.put_alibabacloud" ->>>>>>> ef980f023 (Add Alibaba Cloud inference API (#4021)) }, "responseMediaType": [ "application/json" @@ -4822,11 +4799,7 @@ "methods": [ "PUT" ], -<<<<<<< HEAD - "path": "/_inference/{task_type}/{azureopenai_inference_id}" -======= "path": "/_inference/{task_type}/{alibabacloud_inference_id}" ->>>>>>> ef980f023 (Add Alibaba Cloud inference API (#4021)) } ] }, @@ -4837,10 +4810,6 @@ "visibility": "public" }, "stack": { -<<<<<<< HEAD ->>>>>>> d5b1a529a (Add Azure OpenAI inference details (#4019)) -======= ->>>>>>> ef980f023 (Add Alibaba Cloud inference API (#4021)) "since": "8.12.0", "stability": "stable", "visibility": "public" @@ -4848,7 +4817,7 @@ }, "description": "Create an Amazon Bedrock inference endpoint.\n\nCreates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-amazonbedrock", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-amazon-bedrock.html", "name": "inference.put_amazonbedrock", "privileges": { "cluster": [ @@ -4893,7 +4862,7 @@ }, "description": "Create an Anthropic inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `anthropic` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-anthropic", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-anthropic.html", "name": "inference.put_anthropic", "privileges": { "cluster": [ @@ -4938,7 +4907,7 @@ }, "description": "Create an Azure AI studio inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureaistudio` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-azureaistudio", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-azure-ai-studio.html", "name": "inference.put_azureaistudio", "privileges": { "cluster": [ @@ -4976,8 +4945,6 @@ "visibility": "public" }, "stack": { -<<<<<<< HEAD -======= "since": "8.14.0", "stability": "stable", "visibility": "public" @@ -4985,7 +4952,7 @@ }, "description": "Create an Azure OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-azureopenai", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-azure-openai.html", "name": "inference.put_azureopenai", "privileges": { "cluster": [ @@ -5023,7 +4990,6 @@ "visibility": "public" }, "stack": { ->>>>>>> b052219ca (Update doc_id URLs for inference APIs (#4127)) "since": "8.13.0", "stability": "stable", "visibility": "public" @@ -5031,11 +4997,7 @@ }, "description": "Create a Cohere inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `cohere` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-cohere", -<<<<<<< HEAD - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/branch/infer-service-cohere.html", -======= - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere", ->>>>>>> b052219ca (Update doc_id URLs for inference APIs (#4127)) + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-cohere.html", "name": "inference.put_cohere", "privileges": { "cluster": [ @@ -5073,7 +5035,6 @@ "visibility": "public" }, "stack": { ->>>>>>> f5eaaab24 (Add Amazon Bedrock inference API (#4022)) "since": "8.12.0", "stability": "stable", "visibility": "public" @@ -5081,11 +5042,7 @@ }, "description": "Create an Elastic Inference Service (EIS) inference endpoint.\n\nCreate an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).", "docId": "inference-api-put-eis", -<<<<<<< HEAD - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-elastic.html", -======= - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-eis", ->>>>>>> b052219ca (Update doc_id URLs for inference APIs (#4127)) + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-elastic.html", "name": "inference.put_eis", "privileges": { "cluster": [ @@ -5123,8 +5080,6 @@ "visibility": "public" }, "stack": { -<<<<<<< HEAD -======= "since": "8.13.0", "stability": "stable", "visibility": "public" @@ -5132,7 +5087,7 @@ }, "description": "Create an Elasticsearch inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `elasticsearch` service.\n\n> info\n> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.\n\nIf you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-elasticsearch", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-elasticsearch.html", "name": "inference.put_elasticsearch", "privileges": { "cluster": [ @@ -5181,7 +5136,7 @@ }, "description": "Create an ELSER inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `elser` service.\nYou can also deploy ELSER by using the Elasticsearch inference integration.\n\n> info\n> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.\n\nThe API request will automatically download and deploy the ELSER model if it isn't already downloaded.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-elser", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elser", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-elser.html", "name": "inference.put_elser", "privileges": { "cluster": [ @@ -5226,7 +5181,7 @@ }, "description": "Create an Google AI Studio inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `googleaistudio` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-googleaistudio", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-google-ai-studio.html", "name": "inference.put_googleaistudio", "privileges": { "cluster": [ @@ -5271,7 +5226,7 @@ }, "description": "Create a Google Vertex AI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `googlevertexai` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-googlevertexai", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-google-vertex-ai.html", "name": "inference.put_googlevertexai", "privileges": { "cluster": [ @@ -5309,7 +5264,6 @@ "visibility": "public" }, "stack": { ->>>>>>> b052219ca (Update doc_id URLs for inference APIs (#4127)) "since": "8.12.0", "stability": "stable", "visibility": "public" @@ -5317,7 +5271,7 @@ }, "description": "Create a Hugging Face inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `hugging_face` service.\n\nYou must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL.\nSelect the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section.\nCreate the endpoint and copy the URL after the endpoint initialization has been finished.\n\nThe following models are recommended for the Hugging Face service:\n\n* `all-MiniLM-L6-v2`\n* `all-MiniLM-L12-v2`\n* `all-mpnet-base-v2`\n* `e5-base-v2`\n* `e5-small-v2`\n* `multilingual-e5-base`\n* `multilingual-e5-small`\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-huggingface", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-hugging-face.html", "name": "inference.put_hugging_face", "privileges": { "cluster": [ @@ -5355,8 +5309,6 @@ "visibility": "public" }, "stack": { -<<<<<<< HEAD -======= "since": "8.18.0", "stability": "stable", "visibility": "public" @@ -5364,7 +5316,7 @@ }, "description": "Create an JinaAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `jinaai` service.\n\nTo review the available `rerank` models, refer to .\nTo review the available `text_embedding` models, refer to the .\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-jinaai", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-jinaai.html", "name": "inference.put_jinaai", "privileges": { "cluster": [ @@ -5409,7 +5361,7 @@ }, "description": "Create a Mistral inference endpoint.\n\nCreates an inference endpoint to perform an inference task with the `mistral` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-mistral", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-mistral.html", "name": "inference.put_mistral", "privileges": { "cluster": [ @@ -5447,7 +5399,6 @@ "visibility": "public" }, "stack": { ->>>>>>> f7c35e7b1 (Add Mistral inference details (#3997)) "since": "8.12.0", "stability": "stable", "visibility": "public" @@ -5455,7 +5406,7 @@ }, "description": "Create an OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `openai` service.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "docId": "inference-api-put-openai", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-service-openai.html", "name": "inference.put_openai", "privileges": { "cluster": [ @@ -28150,11 +28101,9 @@ "kind": "properties", "properties": [ { -<<<<<<< HEAD -======= "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -28166,9 +28115,6 @@ } }, { -<<<<<<< HEAD -<<<<<<< HEAD -======= "description": "The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`.", "name": "service", "required": true, @@ -28297,7 +28243,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -28309,43 +28255,26 @@ } }, { ->>>>>>> ef980f023 (Add Alibaba Cloud inference API (#4021)) "description": "The type of service supported for the specified task type. In this case, `amazonbedrock`.", -======= - "description": "The type of service supported for the specified task type. In this case, `azureopenai`.", ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) "name": "service", "required": true, "type": { "kind": "instance_of", "type": { "name": "ServiceType", -<<<<<<< HEAD "namespace": "inference.put_amazonbedrock" -======= - "namespace": "inference.put_azureopenai" ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) } } }, { -<<<<<<< HEAD "description": "Settings used to install the inference model. These settings are specific to the `amazonbedrock` service.", -======= - "description": "Settings used to install the inference model. These settings are specific to the `azureopenai` service.", ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) "name": "service_settings", "required": true, "type": { "kind": "instance_of", "type": { -<<<<<<< HEAD "name": "AmazonBedrockServiceSettings", "namespace": "inference.put_amazonbedrock" -======= - "name": "AzureOpenAIServiceSettings", - "namespace": "inference.put_azureopenai" ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) } } }, @@ -28356,19 +28285,13 @@ "type": { "kind": "instance_of", "type": { -<<<<<<< HEAD "name": "AmazonBedrockTaskSettings", "namespace": "inference.put_amazonbedrock" -======= - "name": "AzureOpenAITaskSettings", - "namespace": "inference.put_azureopenai" ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) } } } ] }, -<<<<<<< HEAD "description": "Create an Amazon Bedrock inference endpoint.\n\nCreates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { "PutAmazonBedrockRequestExample1": { @@ -28380,19 +28303,6 @@ "description": "Run `PUT _inference/completion/openai-completion` to create an inference endpoint to perform a completion task type.", "summary": "A completion task", "value": "{\n \"service\": \"openai\",\n \"service_settings\": {\n \"api_key\": \"OpenAI-API-Key\",\n \"model_id\": \"gpt-3.5-turbo\"\n }\n}" -======= - "description": "Create an Azure OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", - "examples": { - "PutAzureOpenAiRequestExample1": { - "description": "Run `PUT _inference/text_embedding/azure_openai_embeddings` to create an inference endpoint that performs a `text_embedding` task. You do not specify a model, as it is defined already in the Azure OpenAI deployment.", - "summary": "A text embedding task", - "value": "{\n \"service\": \"azureopenai\",\n \"service_settings\": {\n \"api_key\": \"Api-Key\",\n \"resource_name\": \"Resource-name\",\n \"deployment_id\": \"Deployment-id\",\n \"api_version\": \"2024-02-01\"\n }\n}" - }, - "PutAzureOpenAiRequestExample2": { - "description": "Run `PUT _inference/completion/azure_openai_completion` to create an inference endpoint that performs a `completion` task.", - "summary": "A completion task", - "value": "{\n \"service\": \"azureopenai\",\n \"service_settings\": {\n \"api_key\": \"Api-Key\",\n \"resource_name\": \"Resource-name\",\n \"deployment_id\": \"Deployment-id\",\n \"api_version\": \"2024-02-01\"\n }\n}" ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) } }, "inherits": { @@ -28404,41 +28314,24 @@ "kind": "request", "name": { "name": "Request", -<<<<<<< HEAD "namespace": "inference.put_amazonbedrock" }, "path": [ { "description": "The type of the inference task that the model will perform.", -======= - "namespace": "inference.put_azureopenai" - }, - "path": [ - { - "description": "The type of the inference task that the model will perform.\nNOTE: The `chat_completion` task type only supports streaming and only through the _stream API.", ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) "name": "task_type", "required": true, "type": { "kind": "instance_of", "type": { -<<<<<<< HEAD "name": "AmazonBedrockTaskType", "namespace": "inference.put_amazonbedrock" -======= - "name": "AzureOpenAITaskType", - "namespace": "inference.put_azureopenai" ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) } } }, { "description": "The unique identifier of the inference endpoint.", -<<<<<<< HEAD "name": "amazonbedrock_inference_id", -======= - "name": "azureopenai_inference_id", ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) "required": true, "type": { "kind": "instance_of", @@ -28450,11 +28343,7 @@ } ], "query": [], -<<<<<<< HEAD "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L28-L84" -======= - "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L27-L88" ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) }, { "body": { @@ -28470,7 +28359,6 @@ "kind": "response", "name": { "name": "Response", -<<<<<<< HEAD "namespace": "inference.put_amazonbedrock" }, "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockResponse.ts#L22-L24" @@ -28485,7 +28373,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -28609,7 +28497,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -28739,7 +28627,137 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", + "name": "chunking_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "InferenceChunkingSettings", + "namespace": "inference._types" + } + } + }, + { + "description": "The type of service supported for the specified task type. In this case, `azureopenai`.", + "name": "service", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "ServiceType", + "namespace": "inference.put_azureopenai" + } + } + }, + { + "description": "Settings used to install the inference model. These settings are specific to the `azureopenai` service.", + "name": "service_settings", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "AzureOpenAIServiceSettings", + "namespace": "inference.put_azureopenai" + } + } + }, + { + "description": "Settings to configure the inference task.\nThese settings are specific to the task type you specified.", + "name": "task_settings", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "AzureOpenAITaskSettings", + "namespace": "inference.put_azureopenai" + } + } + } + ] + }, + "description": "Create an Azure OpenAI inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "examples": { + "PutAzureOpenAiRequestExample1": { + "description": "Run `PUT _inference/text_embedding/azure_openai_embeddings` to create an inference endpoint that performs a `text_embedding` task. You do not specify a model, as it is defined already in the Azure OpenAI deployment.", + "summary": "A text embedding task", + "value": "{\n \"service\": \"azureopenai\",\n \"service_settings\": {\n \"api_key\": \"Api-Key\",\n \"resource_name\": \"Resource-name\",\n \"deployment_id\": \"Deployment-id\",\n \"api_version\": \"2024-02-01\"\n }\n}" + }, + "PutAzureOpenAiRequestExample2": { + "description": "Run `PUT _inference/completion/azure_openai_completion` to create an inference endpoint that performs a `completion` task.", + "summary": "A completion task", + "value": "{\n \"service\": \"azureopenai\",\n \"service_settings\": {\n \"api_key\": \"Api-Key\",\n \"resource_name\": \"Resource-name\",\n \"deployment_id\": \"Deployment-id\",\n \"api_version\": \"2024-02-01\"\n }\n}" + } + }, + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "kind": "request", + "name": { + "name": "Request", + "namespace": "inference.put_azureopenai" + }, + "path": [ + { + "description": "The type of the inference task that the model will perform.\nNOTE: The `chat_completion` task type only supports streaming and only through the _stream API.", + "name": "task_type", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "AzureOpenAITaskType", + "namespace": "inference.put_azureopenai" + } + } + }, + { + "description": "The unique identifier of the inference endpoint.", + "name": "azureopenai_inference_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], + "query": [], + "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L27-L88" + }, + { + "body": { + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "InferenceEndpointInfo", + "namespace": "inference._types" + } + } + }, + "kind": "response", + "name": { + "name": "Response", + "namespace": "inference.put_azureopenai" + }, + "specLocation": "inference/put_azureopenai/PutAzureOpenAiResponse.ts#L22-L24" + }, + { + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "properties", + "properties": [ + { + "description": "The chunking configuration object.", + "extDocId": "inference-chunking", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -28858,11 +28876,6 @@ "namespace": "inference.put_cohere" }, "specLocation": "inference/put_cohere/PutCohereResponse.ts#L22-L24" -======= - "namespace": "inference.put_azureopenai" - }, - "specLocation": "inference/put_azureopenai/PutAzureOpenAiResponse.ts#L22-L24" ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) }, { "attachedBehaviors": [ @@ -28872,7 +28885,6 @@ "kind": "properties", "properties": [ { ->>>>>>> f5eaaab24 (Add Amazon Bedrock inference API (#4022)) "description": "The type of service supported for the specified task type. In this case, `elastic`.", "name": "service", "required": true, @@ -28967,7 +28979,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -28979,8 +28991,6 @@ } }, { -<<<<<<< HEAD -======= "description": "The type of service supported for the specified task type. In this case, `elasticsearch`.", "name": "service", "required": true, @@ -29125,7 +29135,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -29253,7 +29263,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -29366,7 +29376,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -29496,7 +29506,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -29508,7 +29518,6 @@ } }, { ->>>>>>> b052219ca (Update doc_id URLs for inference APIs (#4127)) "description": "The type of service supported for the specified task type. In this case, `hugging_face`.", "name": "service", "required": true, @@ -29610,7 +29619,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -29622,8 +29631,6 @@ } }, { -<<<<<<< HEAD -======= "description": "The type of service supported for the specified task type. In this case, `jinaai`.", "name": "service", "required": true, @@ -29742,7 +29749,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -29854,7 +29861,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -29866,7 +29873,6 @@ } }, { ->>>>>>> f7c35e7b1 (Add Mistral inference details (#3997)) "description": "The type of service supported for the specified task type. In this case, `openai`.", "name": "service", "required": true, @@ -29985,7 +29991,7 @@ { "description": "The chunking configuration object.", "extDocId": "inference-chunking", - "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/inference-apis.html#infer-chunking-config", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/inference-apis.html#infer-chunking-config", "name": "chunking_settings", "required": false, "type": { @@ -30140,7 +30146,7 @@ }, "description": "Create a Watsonx inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `watsonxai` service.\nYou need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.\nYou can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.\n\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", "examples": { - "InferenceRequestExample1": { + "PutWatsonxRequestExample1": { "description": "Run `PUT _inference/text_embedding/watsonx-embeddings` to create an Watonsx inference endpoint that performs a text embedding task.", "value": "{\n \"service\": \"watsonxai\",\n \"service_settings\": {\n \"api_key\": \"Watsonx-API-Key\", \n \"url\": \"Wastonx-URL\", \n \"model_id\": \"ibm/slate-30m-english-rtrvr\",\n \"project_id\": \"IBM-Cloud-ID\", \n \"api_version\": \"2024-03-14\"\n }\n}" } @@ -102669,8 +102675,6 @@ "kind": "enum", "members": [ { -<<<<<<< HEAD -======= "name": "completion" }, { @@ -102792,120 +102796,6 @@ { "name": "completion" }, - { - "name": "rerank" - }, - { - "name": "text_embedding" - } - ], - "name": { - "name": "CohereTaskType", - "namespace": "inference.put_cohere" - }, - "specLocation": "inference/put_cohere/PutCohereRequest.ts#L84-L88" - }, - { - "kind": "enum", - "members": [ - { - "name": "byte" - }, - { - "name": "float" - }, - { - "name": "int8" - } - ], - "name": { - "name": "EmbeddingType", - "namespace": "inference.put_cohere" - }, - "specLocation": "inference/put_cohere/PutCohereRequest.ts#L94-L98" - }, - { - "kind": "enum", - "members": [ - { - "name": "classification" - }, - { - "name": "clustering" - }, - { - "name": "ingest" - }, - { - "name": "search" - } - ], - "name": { - "name": "InputType", - "namespace": "inference.put_cohere" - }, - "specLocation": "inference/put_cohere/PutCohereRequest.ts#L100-L105" - }, - { - "kind": "enum", - "members": [ - { - "name": "cohere" - } - ], - "name": { - "name": "ServiceType", - "namespace": "inference.put_cohere" - }, - "specLocation": "inference/put_cohere/PutCohereRequest.ts#L90-L92" - }, - { - "kind": "enum", - "members": [ - { - "name": "cosine" - }, - { - "name": "dot_product" - }, - { - "name": "l2_norm" - } - ], - "name": { - "name": "SimilarityType", - "namespace": "inference.put_cohere" - }, - "specLocation": "inference/put_cohere/PutCohereRequest.ts#L107-L111" - }, - { - "kind": "enum", - "members": [ - { - "name": "END" - }, - { - "name": "NONE" - }, - { - "name": "START" - } - ], - "name": { - "name": "TruncateType", - "namespace": "inference.put_cohere" - }, - "specLocation": "inference/put_cohere/PutCohereRequest.ts#L113-L117" - }, - { - "kind": "enum", - "members": [ - { -<<<<<<< HEAD ->>>>>>> f5eaaab24 (Add Amazon Bedrock inference API (#4022)) -======= - "name": "completion" - }, { "name": "text_embedding" } @@ -102933,7 +102823,117 @@ "kind": "enum", "members": [ { ->>>>>>> d5b1a529a (Add Azure OpenAI inference details (#4019)) + "name": "completion" + }, + { + "name": "rerank" + }, + { + "name": "text_embedding" + } + ], + "name": { + "name": "CohereTaskType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L84-L88" + }, + { + "kind": "enum", + "members": [ + { + "name": "byte" + }, + { + "name": "float" + }, + { + "name": "int8" + } + ], + "name": { + "name": "EmbeddingType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L94-L98" + }, + { + "kind": "enum", + "members": [ + { + "name": "classification" + }, + { + "name": "clustering" + }, + { + "name": "ingest" + }, + { + "name": "search" + } + ], + "name": { + "name": "InputType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L100-L105" + }, + { + "kind": "enum", + "members": [ + { + "name": "cohere" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L90-L92" + }, + { + "kind": "enum", + "members": [ + { + "name": "cosine" + }, + { + "name": "dot_product" + }, + { + "name": "l2_norm" + } + ], + "name": { + "name": "SimilarityType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L107-L111" + }, + { + "kind": "enum", + "members": [ + { + "name": "END" + }, + { + "name": "NONE" + }, + { + "name": "START" + } + ], + "name": { + "name": "TruncateType", + "namespace": "inference.put_cohere" + }, + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L113-L117" + }, + { + "kind": "enum", + "members": [ + { "name": "chat_completion" } ], @@ -102956,6 +102956,122 @@ }, "specLocation": "inference/put_eis/PutEisRequest.ts#L68-L70" }, + { + "kind": "enum", + "members": [ + { + "name": "rerank" + }, + { + "name": "sparse_embedding" + }, + { + "name": "text_embedding" + } + ], + "name": { + "name": "ElasticsearchTaskType", + "namespace": "inference.put_elasticsearch" + }, + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L88-L92" + }, + { + "kind": "enum", + "members": [ + { + "name": "elasticsearch" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_elasticsearch" + }, + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L94-L96" + }, + { + "kind": "enum", + "members": [ + { + "name": "sparse_embedding" + } + ], + "name": { + "name": "ElserTaskType", + "namespace": "inference.put_elser" + }, + "specLocation": "inference/put_elser/PutElserRequest.ts#L84-L86" + }, + { + "kind": "enum", + "members": [ + { + "name": "elser" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_elser" + }, + "specLocation": "inference/put_elser/PutElserRequest.ts#L88-L90" + }, + { + "kind": "enum", + "members": [ + { + "name": "completion" + }, + { + "name": "text_embedding" + } + ], + "name": { + "name": "GoogleAiStudioTaskType", + "namespace": "inference.put_googleaistudio" + }, + "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L77-L80" + }, + { + "kind": "enum", + "members": [ + { + "name": "googleaistudio" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_googleaistudio" + }, + "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L82-L84" + }, + { + "kind": "enum", + "members": [ + { + "name": "rerank" + }, + { + "name": "text_embedding" + } + ], + "name": { + "name": "GoogleVertexAITaskType", + "namespace": "inference.put_googlevertexai" + }, + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L83-L86" + }, + { + "kind": "enum", + "members": [ + { + "name": "googlevertexai" + } + ], + "name": { + "name": "ServiceType", + "namespace": "inference.put_googlevertexai" + }, + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L88-L90" + }, { "kind": "enum", "members": [ @@ -102986,8 +103102,6 @@ "kind": "enum", "members": [ { -<<<<<<< HEAD -======= "name": "rerank" }, { @@ -103084,7 +103198,6 @@ "kind": "enum", "members": [ { ->>>>>>> f7c35e7b1 (Add Mistral inference details (#3997)) "name": "chat_completion" }, { @@ -123605,13 +123718,6 @@ { "kind": "interface", "name": { -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= -======= ->>>>>>> d5b1a529a (Add Azure OpenAI inference details (#4019)) -======= "name": "AlibabaCloudServiceSettings", "namespace": "inference.put_alibabacloud" }, @@ -123740,7 +123846,6 @@ { "kind": "interface", "name": { ->>>>>>> ef980f023 (Add Alibaba Cloud inference API (#4021)) "name": "AmazonBedrockServiceSettings", "namespace": "inference.put_amazonbedrock" }, @@ -123774,17 +123879,6 @@ { "description": "The model provider for your deployment.\nNote that some providers may support only certain task types.\nSupported providers include:\n\n* `amazontitan` - available for `text_embedding` and `completion` task types\n* `anthropic` - available for `completion` task type only\n* `ai21labs` - available for `completion` task type only\n* `cohere` - available for `text_embedding` and `completion` task types\n* `meta` - available for `completion` task type only\n* `mistral` - available for `completion` task type only", "name": "provider", -======= - "name": "AzureOpenAIServiceSettings", - "namespace": "inference.put_azureopenai" - }, - "properties": [ - { - "description": "A valid API key for your Azure OpenAI account.\nYou must specify either `api_key` or `entra_id`.\nIf you do not provide either or you provide both, you will receive an error when you try to create your model.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", - "extDocId": "azureopenai-auth", - "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication", - "name": "api_key", ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) "required": false, "type": { "kind": "instance_of", @@ -123795,15 +123889,10 @@ } }, { -<<<<<<< HEAD "description": "The region that your model or ARN is deployed in.\nThe list of available regions per model can be found in the Amazon Bedrock documentation.", "extDocId": "amazonbedrock-models", "extDocUrl": "https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html", "name": "region", -======= - "description": "The Azure API version ID to use.\nIt is recommended to use the latest supported non-preview version.", - "name": "api_version", ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) "required": true, "type": { "kind": "instance_of", @@ -123814,41 +123903,7 @@ } }, { -<<<<<<< HEAD "description": "This setting helps to minimize the number of rate limit errors returned from Watsonx.\nBy default, the `watsonxai` service sets the number of requests allowed per minute to 120.", -======= - "description": "The deployment name of your deployed models.\nYour Azure OpenAI deployments can be found though the Azure OpenAI Studio portal that is linked to your subscription.", - "extDocId": "azureopenai", - "extDocUrl": "https://oai.azure.com/", - "name": "deployment_id", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "description": "A valid Microsoft Entra token.\nYou must specify either `api_key` or `entra_id`.\nIf you do not provide either or you provide both, you will receive an error when you try to create your model.", - "extDocId": "azureopenai-auth", - "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication", - "name": "entra_id", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "description": "This setting helps to minimize the number of rate limit errors returned from Azure.\nThe `azureopenai` service sets a default number of requests allowed per minute depending on the task type.\nFor `text_embedding`, it is set to `1440`.\nFor `completion`, it is set to `120`.", - "extDocId": "azureopenai-quota-limits", - "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/quotas-limits", ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) "name": "rate_limit", "required": false, "type": { @@ -123860,17 +123915,10 @@ } }, { -<<<<<<< HEAD "description": "A valid AWS secret key that is paired with the `access_key`.\nFor informationg about creating and managing access and secret keys, refer to the AWS documentation.", "extDocId": "amazonbedrock-secret-keys", "extDocUrl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html", "name": "secret_key", -======= - "description": "The name of your Azure OpenAI resource.\nYou can find this from the list of resources in the Azure Portal for your subscription.", - "extDocId": "azureopenai-portal", - "extDocUrl": "https://portal.azure.com/#view/HubsExtension/BrowseAll", - "name": "resource_name", ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) "required": true, "type": { "kind": "instance_of", @@ -123881,41 +123929,11 @@ } } ], -<<<<<<< HEAD "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L95-L137" -======= - "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L99-L144" ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) - }, - { - "kind": "interface", - "name": { -<<<<<<< HEAD - "name": "RateLimitSetting", - "namespace": "inference._types" - }, - "properties": [ - { - "description": "The number of requests allowed per minute.", - "name": "requests_per_minute", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "integer", - "namespace": "_types" - } - } - } - ], - "specLocation": "inference/_types/Services.ts#L95-L100" }, { "kind": "interface", "name": { -<<<<<<< HEAD -======= ->>>>>>> ef980f023 (Add Alibaba Cloud inference API (#4021)) "name": "AmazonBedrockTaskSettings", "namespace": "inference.put_amazonbedrock" }, @@ -124211,14 +124229,120 @@ }, { "description": "For a `text_embedding` task, specify the user issuing the request.\nThis information can be used for abuse detection.", -======= + "name": "user", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L136-L164" + }, + { + "kind": "interface", + "name": { + "name": "AzureOpenAIServiceSettings", + "namespace": "inference.put_azureopenai" + }, + "properties": [ + { + "description": "A valid API key for your Azure OpenAI account.\nYou must specify either `api_key` or `entra_id`.\nIf you do not provide either or you provide both, you will receive an error when you try to create your model.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", + "extDocId": "azureopenai-auth", + "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication", + "name": "api_key", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The Azure API version ID to use.\nIt is recommended to use the latest supported non-preview version.", + "name": "api_version", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The deployment name of your deployed models.\nYour Azure OpenAI deployments can be found though the Azure OpenAI Studio portal that is linked to your subscription.", + "extDocId": "azureopenai", + "extDocUrl": "https://oai.azure.com/", + "name": "deployment_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "A valid Microsoft Entra token.\nYou must specify either `api_key` or `entra_id`.\nIf you do not provide either or you provide both, you will receive an error when you try to create your model.", + "extDocId": "azureopenai-auth", + "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication", + "name": "entra_id", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "This setting helps to minimize the number of rate limit errors returned from Azure.\nThe `azureopenai` service sets a default number of requests allowed per minute depending on the task type.\nFor `text_embedding`, it is set to `1440`.\nFor `completion`, it is set to `120`.", + "extDocId": "azureopenai-quota-limits", + "extDocUrl": "https://learn.microsoft.com/en-us/azure/ai-services/openai/quotas-limits", + "name": "rate_limit", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "RateLimitSetting", + "namespace": "inference._types" + } + } + }, + { + "description": "The name of your Azure OpenAI resource.\nYou can find this from the list of resources in the Azure Portal for your subscription.", + "extDocId": "azureopenai-portal", + "extDocUrl": "https://portal.azure.com/#view/HubsExtension/BrowseAll", + "name": "resource_name", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L99-L144" + }, + { + "kind": "interface", + "name": { "name": "AzureOpenAITaskSettings", "namespace": "inference.put_azureopenai" }, "properties": [ { "description": "For a `completion` or `text_embedding` task, specify the user issuing the request.\nThis information can be used for abuse detection.", ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) "name": "user", "required": false, "type": { @@ -124230,8 +124354,7 @@ } } ], -<<<<<<< HEAD - "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L136-L164" + "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L146-L152" }, { "kind": "interface", @@ -124363,14 +124486,10 @@ } ], "specLocation": "inference/put_cohere/PutCohereRequest.ts#L162-L194" -======= - "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L146-L152" ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) }, { "kind": "interface", "name": { ->>>>>>> f5eaaab24 (Add Amazon Bedrock inference API (#4022)) "name": "EisServiceSettings", "namespace": "inference.put_eis" }, @@ -124405,19 +124524,51 @@ { "kind": "interface", "name": { -<<<<<<< HEAD - "name": "RateLimitSetting", - "namespace": "inference._types" -======= -<<<<<<< HEAD "name": "ElasticsearchServiceSettings", "namespace": "inference.put_elasticsearch" ->>>>>>> d5b1a529a (Add Azure OpenAI inference details (#4019)) }, "properties": [ { - "description": "The number of requests allowed per minute.", - "name": "requests_per_minute", + "description": "Adaptive allocations configuration details.\nIf `enabled` is true, the number of allocations of the model is set based on the current load the process gets.\nWhen the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set.\nWhen the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set.\nIf `enabled` is true, do not set the number of allocations manually.", + "name": "adaptive_allocations", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "AdaptiveAllocations", + "namespace": "inference.put_elasticsearch" + } + } + }, + { + "description": "The deployment identifier for a trained model deployment.\nWhen `deployment_id` is used the `model_id` is optional.", + "name": "deployment_id", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The name of the model to use for the inference task.\nIt can be the ID of a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model that was uploaded by using the Eland client.", + "extDocId": "eland-import", + "extDocUrl": "https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-import-model.html#ml-nlp-import-script", + "name": "model_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The total number of allocations that are assigned to the model across machine learning nodes.\nIncreasing this value generally increases the throughput.\nIf adaptive allocations are enabled, do not set this value because it's automatically set.", + "name": "num_allocations", "required": false, "type": { "kind": "instance_of", @@ -124426,9 +124577,340 @@ "namespace": "_types" } } + }, + { + "description": "The number of threads used by each model allocation during inference.\nThis setting generally increases the speed per inference request.\nThe inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node.\nThe value must be a power of 2.\nThe maximum value is 32.", + "name": "num_threads", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } } ], - "specLocation": "inference/_types/Services.ts#L95-L100" + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L117-L151" + }, + { + "kind": "interface", + "name": { + "name": "AdaptiveAllocations", + "namespace": "inference.put_elasticsearch" + }, + "properties": [ + { + "description": "Turn on `adaptive_allocations`.", + "name": "enabled", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "The maximum number of allocations to scale to.\nIf set, it must be greater than or equal to `min_number_of_allocations`.", + "name": "max_number_of_allocations", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "The minimum number of allocations to scale to.\nIf set, it must be greater than or equal to 0.\nIf not defined, the deployment scales to 0.", + "name": "min_number_of_allocations", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L98-L115" + }, + { + "kind": "interface", + "name": { + "name": "ElasticsearchTaskSettings", + "namespace": "inference.put_elasticsearch" + }, + "properties": [ + { + "description": "For a `rerank` task, return the document instead of only the index.", + "name": "return_documents", + "required": false, + "serverDefault": true, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L153-L159" + }, + { + "kind": "interface", + "name": { + "name": "ElserServiceSettings", + "namespace": "inference.put_elser" + }, + "properties": [ + { + "description": "Adaptive allocations configuration details.\nIf `enabled` is true, the number of allocations of the model is set based on the current load the process gets.\nWhen the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set.\nWhen the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set.\nIf `enabled` is true, do not set the number of allocations manually.", + "name": "adaptive_allocations", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "AdaptiveAllocations", + "namespace": "inference.put_elser" + } + } + }, + { + "description": "The total number of allocations this model is assigned across machine learning nodes.\nIncreasing this value generally increases the throughput.\nIf adaptive allocations is enabled, do not set this value because it's automatically set.", + "name": "num_allocations", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "The number of threads used by each model allocation during inference.\nIncreasing this value generally increases the speed per inference request.\nThe inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node.\nThe value must be a power of 2.\nThe maximum value is 32.\n\n> info\n> If you want to optimize your ELSER endpoint for ingest, set the number of threads to 1. If you want to optimize your ELSER endpoint for search, set the number of threads to greater than 1.", + "name": "num_threads", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_elser/PutElserRequest.ts#L111-L137" + }, + { + "kind": "interface", + "name": { + "name": "AdaptiveAllocations", + "namespace": "inference.put_elser" + }, + "properties": [ + { + "description": "Turn on `adaptive_allocations`.", + "name": "enabled", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "The maximum number of allocations to scale to.\nIf set, it must be greater than or equal to `min_number_of_allocations`.", + "name": "max_number_of_allocations", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "The minimum number of allocations to scale to.\nIf set, it must be greater than or equal to 0.\nIf not defined, the deployment scales to 0.", + "name": "min_number_of_allocations", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_elser/PutElserRequest.ts#L92-L109" + }, + { + "kind": "interface", + "name": { + "name": "GoogleAiStudioServiceSettings", + "namespace": "inference.put_googleaistudio" + }, + "properties": [ + { + "description": "A valid API key of your Google Gemini account.", + "name": "api_key", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The name of the model to use for the inference task.\nRefer to the Google documentation for the list of supported models.", + "extDocId": "googleaistudio-models", + "extDocUrl": "https://ai.google.dev/gemini-api/docs/models", + "name": "model_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "This setting helps to minimize the number of rate limit errors returned from Google AI Studio.\nBy default, the `googleaistudio` service sets the number of requests allowed per minute to 360.", + "name": "rate_limit", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "RateLimitSetting", + "namespace": "inference._types" + } + } + } + ], + "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L86-L102" + }, + { + "kind": "interface", + "name": { + "name": "GoogleVertexAIServiceSettings", + "namespace": "inference.put_googlevertexai" + }, + "properties": [ + { + "description": "The name of the location to use for the inference task.\nRefer to the Google documentation for the list of supported locations.", + "extDocId": "googlevertexai-locations", + "extDocUrl": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations", + "name": "location", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The name of the model to use for the inference task.\nRefer to the Google documentation for the list of supported models.", + "extDocId": "googlevertexai-models", + "extDocUrl": "https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api", + "name": "model_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The name of the project to use for the inference task.", + "name": "project_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "This setting helps to minimize the number of rate limit errors returned from Google Vertex AI.\nBy default, the `googlevertexai` service sets the number of requests allowed per minute to 30.000.", + "name": "rate_limit", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "RateLimitSetting", + "namespace": "inference._types" + } + } + }, + { + "description": "A valid service account in JSON format for the Google Vertex AI API.", + "name": "service_account_json", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L92-L118" + }, + { + "kind": "interface", + "name": { + "name": "GoogleVertexAITaskSettings", + "namespace": "inference.put_googlevertexai" + }, + "properties": [ + { + "description": "For a `text_embedding` task, truncate inputs longer than the maximum token length automatically.", + "name": "auto_truncate", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "For a `rerank` task, the number of the top N documents that should be returned.", + "name": "top_n", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L120-L129" }, { "kind": "interface", @@ -124481,8 +124963,6 @@ { "kind": "interface", "name": { -<<<<<<< HEAD -======= "name": "JinaAIServiceSettings", "namespace": "inference.put_jinaai" }, @@ -124591,11 +125071,6 @@ { "kind": "interface", "name": { -<<<<<<< HEAD -======= ->>>>>>> 52fce7a43 (Add Azure OpenAI details and examples) ->>>>>>> d5b1a529a (Add Azure OpenAI inference details (#4019)) -======= "name": "MistralServiceSettings", "namespace": "inference.put_mistral" }, @@ -124658,7 +125133,6 @@ { "kind": "interface", "name": { ->>>>>>> f7c35e7b1 (Add Mistral inference details (#3997)) "name": "OpenAIServiceSettings", "namespace": "inference.put_openai" }, @@ -125966,7 +126440,7 @@ { "description": "Trims whitespace from a field.\nIf the field is an array of strings, all members of the array will be trimmed.\nThis only works on leading and trailing whitespace.", "docId": "trim-processor", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/trim-processor.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/trim-processor.html", "name": "trim", "required": false, "type": { @@ -125980,7 +126454,7 @@ { "description": "Converts a string to its uppercase equivalent.\nIf the field is an array of strings, all members of the array will be converted.", "docId": "uppercase-processor", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/uppercase-processor.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/uppercase-processor.html", "name": "uppercase", "required": false, "type": { @@ -125994,7 +126468,7 @@ { "description": "URL-decodes a string.\nIf the field is an array of strings, all members of the array will be decoded.", "docId": "urldecode-processor", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/urldecode-processor.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/urldecode-processor.html", "name": "urldecode", "required": false, "type": { @@ -126021,7 +126495,7 @@ { "description": "The `user_agent` processor extracts details from the user agent string a browser sends with its web requests.\nThis processor adds this information by default under the `user_agent` field.", "docId": "user-agent-processor", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/user-agent-processor.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/user-agent-processor.html", "name": "user_agent", "required": false, "type": {