diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 601bbffb72..23677aea6a 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -17805,6 +17805,9 @@ "parameters": [ { "$ref": "#/components/parameters/inference.put-inference_id" + }, + { + "$ref": "#/components/parameters/inference.put-timeout" } ], "requestBody": { @@ -17902,6 +17905,9 @@ }, { "$ref": "#/components/parameters/inference.put-inference_id" + }, + { + "$ref": "#/components/parameters/inference.put-timeout" } ], "requestBody": { @@ -18015,6 +18021,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18111,6 +18127,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18197,6 +18223,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18277,6 +18313,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18363,6 +18409,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18449,6 +18505,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18535,6 +18601,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18647,6 +18723,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18737,6 +18823,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18815,6 +18911,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18901,6 +19007,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -18984,6 +19100,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -19070,6 +19196,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -19147,6 +19283,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -19233,6 +19379,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -19319,6 +19475,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -19578,6 +19744,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "The amount of time to wait for the inference request to complete.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -113187,6 +113363,16 @@ }, "style": "simple" }, + "inference.put-timeout": { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" + }, "inference.update-inference_id": { "in": "path", "name": "inference_id", diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index e72456aa7d..77546d334f 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -9791,6 +9791,9 @@ "parameters": [ { "$ref": "#/components/parameters/inference.put-inference_id" + }, + { + "$ref": "#/components/parameters/inference.put-timeout" } ], "requestBody": { @@ -9888,6 +9891,9 @@ }, { "$ref": "#/components/parameters/inference.put-inference_id" + }, + { + "$ref": "#/components/parameters/inference.put-timeout" } ], "requestBody": { @@ -10001,6 +10007,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10097,6 +10113,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10183,6 +10209,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10263,6 +10299,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10349,6 +10395,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10435,6 +10491,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10521,6 +10587,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10633,6 +10709,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10723,6 +10809,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10801,6 +10897,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10887,6 +10993,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -10970,6 +11086,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -11056,6 +11182,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -11133,6 +11269,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -11219,6 +11365,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -11305,6 +11461,16 @@ "$ref": "#/components/schemas/_types.Id" }, "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" } ], "requestBody": { @@ -68010,6 +68176,16 @@ }, "style": "simple" }, + "inference.put-timeout": { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" + }, "ingest.get_pipeline-id": { "in": "path", "name": "id", diff --git a/output/schema/schema.json b/output/schema/schema.json index 9215f58453..4028d1ad1c 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -167289,8 +167289,22 @@ } } ], - "query": [], - "specLocation": "inference/put/PutRequest.ts#L25-L78" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put/PutRequest.ts#L26-L86" }, { "kind": "response", @@ -167537,8 +167551,22 @@ } } ], - "query": [], - "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L30-L77" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_alibabacloud/PutAlibabaCloudRequest.ts#L31-L85" }, { "kind": "response", @@ -167721,8 +167749,22 @@ } } ], - "query": [], - "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L30-L80" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_amazonbedrock/PutAmazonBedrockRequest.ts#L31-L88" }, { "kind": "response", @@ -167872,8 +167914,22 @@ } } ], - "query": [], - "specLocation": "inference/put_anthropic/PutAnthropicRequest.ts#L30-L78" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_anthropic/PutAnthropicRequest.ts#L31-L86" }, { "kind": "response", @@ -168056,8 +168112,22 @@ } } ], - "query": [], - "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L30-L77" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_azureaistudio/PutAzureAiStudioRequest.ts#L31-L85" }, { "kind": "response", @@ -168240,8 +168310,22 @@ } } ], - "query": [], - "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L30-L85" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_azureopenai/PutAzureOpenAiRequest.ts#L31-L93" }, { "kind": "response", @@ -168424,8 +168508,22 @@ } } ], - "query": [], - "specLocation": "inference/put_cohere/PutCohereRequest.ts#L30-L78" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_cohere/PutCohereRequest.ts#L31-L86" }, { "kind": "response", @@ -168736,8 +168834,22 @@ } } ], - "query": [], - "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L30-L91" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_elasticsearch/PutElasticsearchRequest.ts#L31-L99" }, { "kind": "response", @@ -168918,8 +169030,22 @@ } } ], - "query": [], - "specLocation": "inference/put_elser/PutElserRequest.ts#L29-L86" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_elser/PutElserRequest.ts#L30-L94" }, { "kind": "response", @@ -169064,8 +169190,22 @@ } } ], - "query": [], - "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L29-L71" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_googleaistudio/PutGoogleAiStudioRequest.ts#L30-L79" }, { "kind": "response", @@ -169248,8 +169388,22 @@ } } ], - "query": [], - "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L30-L77" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_googlevertexai/PutGoogleVertexAiRequest.ts#L31-L85" }, { "kind": "response", @@ -169420,8 +169574,22 @@ } } ], - "query": [], - "specLocation": "inference/put_hugging_face/PutHuggingFaceRequest.ts#L29-L85" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_hugging_face/PutHuggingFaceRequest.ts#L30-L93" }, { "kind": "response", @@ -169604,8 +169772,22 @@ } } ], - "query": [], - "specLocation": "inference/put_jinaai/PutJinaAiRequest.ts#L30-L80" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_jinaai/PutJinaAiRequest.ts#L31-L88" }, { "kind": "response", @@ -169743,8 +169925,22 @@ } } ], - "query": [], - "specLocation": "inference/put_mistral/PutMistralRequest.ts#L29-L72" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_mistral/PutMistralRequest.ts#L30-L80" }, { "kind": "response", @@ -169927,8 +170123,22 @@ } } ], - "query": [], - "specLocation": "inference/put_openai/PutOpenAiRequest.ts#L30-L78" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_openai/PutOpenAiRequest.ts#L31-L86" }, { "kind": "response", @@ -170111,8 +170321,22 @@ } } ], - "query": [], - "specLocation": "inference/put_voyageai/PutVoyageAIRequest.ts#L30-L79" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_voyageai/PutVoyageAIRequest.ts#L31-L87" }, { "kind": "response", @@ -170236,8 +170460,22 @@ } } ], - "query": [], - "specLocation": "inference/put_watsonx/PutWatsonxRequest.ts#L28-L68" + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_watsonx/PutWatsonxRequest.ts#L29-L76" }, { "kind": "response", @@ -170741,8 +170979,22 @@ } } ], - "query": [], - "specLocation": "inference/stream_completion/StreamInferenceRequest.ts#L24-L63" + "query": [ + { + "description": "The amount of time to wait for the inference request to complete.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/stream_completion/StreamInferenceRequest.ts#L25-L71" }, { "kind": "response", diff --git a/output/typescript/types.ts b/output/typescript/types.ts index 43125a06a8..653ed61656 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -13916,6 +13916,7 @@ export type InferenceInferenceResponse = InferenceInferenceResult export interface InferencePutRequest extends RequestBase { task_type?: InferenceTaskType inference_id: Id + timeout?: Duration body?: InferenceInferenceEndpoint } @@ -13924,6 +13925,7 @@ export type InferencePutResponse = InferenceInferenceEndpointInfo export interface InferencePutAlibabacloudRequest extends RequestBase { task_type: InferenceAlibabaCloudTaskType alibabacloud_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceAlibabaCloudServiceType @@ -13937,6 +13939,7 @@ export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAli export interface InferencePutAmazonbedrockRequest extends RequestBase { task_type: InferenceAmazonBedrockTaskType amazonbedrock_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceAmazonBedrockServiceType @@ -13950,6 +13953,7 @@ export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAm export interface InferencePutAnthropicRequest extends RequestBase { task_type: InferenceAnthropicTaskType anthropic_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceAnthropicServiceType @@ -13963,6 +13967,7 @@ export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthro export interface InferencePutAzureaistudioRequest extends RequestBase { task_type: InferenceAzureAiStudioTaskType azureaistudio_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceAzureAiStudioServiceType @@ -13976,6 +13981,7 @@ export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAz export interface InferencePutAzureopenaiRequest extends RequestBase { task_type: InferenceAzureOpenAITaskType azureopenai_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceAzureOpenAIServiceType @@ -13989,6 +13995,7 @@ export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzur export interface InferencePutCohereRequest extends RequestBase { task_type: InferenceCohereTaskType cohere_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceCohereServiceType @@ -14002,6 +14009,7 @@ export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere export interface InferencePutElasticsearchRequest extends RequestBase { task_type: InferenceElasticsearchTaskType elasticsearch_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceElasticsearchServiceType @@ -14015,6 +14023,7 @@ export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoEl export interface InferencePutElserRequest extends RequestBase { task_type: InferenceElserTaskType elser_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceElserServiceType @@ -14027,6 +14036,7 @@ export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER export interface InferencePutGoogleaistudioRequest extends RequestBase { task_type: InferenceGoogleAiStudioTaskType googleaistudio_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceGoogleAiServiceType @@ -14039,6 +14049,7 @@ export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoG export interface InferencePutGooglevertexaiRequest extends RequestBase { task_type: InferenceGoogleVertexAITaskType googlevertexai_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceGoogleVertexAIServiceType @@ -14052,6 +14063,7 @@ export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoG export interface InferencePutHuggingFaceRequest extends RequestBase { task_type: InferenceHuggingFaceTaskType huggingface_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceHuggingFaceServiceType @@ -14064,6 +14076,7 @@ export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHugg export interface InferencePutJinaaiRequest extends RequestBase { task_type: InferenceJinaAITaskType jinaai_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceJinaAIServiceType @@ -14077,6 +14090,7 @@ export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi export interface InferencePutMistralRequest extends RequestBase { task_type: InferenceMistralTaskType mistral_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceMistralServiceType @@ -14089,6 +14103,7 @@ export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral export interface InferencePutOpenaiRequest extends RequestBase { task_type: InferenceOpenAITaskType openai_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceOpenAIServiceType @@ -14102,6 +14117,7 @@ export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI export interface InferencePutVoyageaiRequest extends RequestBase { task_type: InferenceVoyageAITaskType voyageai_inference_id: Id + timeout?: Duration body?: { chunking_settings?: InferenceInferenceChunkingSettings service: InferenceVoyageAIServiceType @@ -14115,6 +14131,7 @@ export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageA export interface InferencePutWatsonxRequest extends RequestBase { task_type: InferenceWatsonxTaskType watsonx_inference_id: Id + timeout?: Duration body?: { service: InferenceWatsonxServiceType service_settings: InferenceWatsonxServiceSettings @@ -14148,6 +14165,7 @@ export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInference export interface InferenceStreamCompletionRequest extends RequestBase { inference_id: Id + timeout?: Duration body?: { input: string | string[] task_settings?: InferenceTaskSettings diff --git a/specification/inference/put/PutRequest.ts b/specification/inference/put/PutRequest.ts index 36eb3e88cb..8f609d35e9 100644 --- a/specification/inference/put/PutRequest.ts +++ b/specification/inference/put/PutRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { InferenceEndpoint } from '@inference/_types/Services' import { TaskType } from '@inference/_types/TaskType' @@ -73,6 +74,13 @@ export interface Request extends RequestBase { */ inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } /** @codegen_name inference_config */ body: InferenceEndpoint } diff --git a/specification/inference/put_alibabacloud/PutAlibabaCloudRequest.ts b/specification/inference/put_alibabacloud/PutAlibabaCloudRequest.ts index f390250bbd..c725397056 100644 --- a/specification/inference/put_alibabacloud/PutAlibabaCloudRequest.ts +++ b/specification/inference/put_alibabacloud/PutAlibabaCloudRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { AlibabaCloudServiceSettings, AlibabaCloudServiceType, @@ -54,6 +55,13 @@ export interface Request extends RequestBase { */ alibabacloud_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_amazonbedrock/PutAmazonBedrockRequest.ts b/specification/inference/put_amazonbedrock/PutAmazonBedrockRequest.ts index 0420c26263..61927ce3bf 100644 --- a/specification/inference/put_amazonbedrock/PutAmazonBedrockRequest.ts +++ b/specification/inference/put_amazonbedrock/PutAmazonBedrockRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { AmazonBedrockServiceSettings, AmazonBedrockServiceType, @@ -57,6 +58,13 @@ export interface Request extends RequestBase { */ amazonbedrock_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_anthropic/PutAnthropicRequest.ts b/specification/inference/put_anthropic/PutAnthropicRequest.ts index d0e0b87ed3..d7942495c3 100644 --- a/specification/inference/put_anthropic/PutAnthropicRequest.ts +++ b/specification/inference/put_anthropic/PutAnthropicRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { AnthropicServiceSettings, AnthropicServiceType, @@ -55,6 +56,13 @@ export interface Request extends RequestBase { */ anthropic_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_azureaistudio/PutAzureAiStudioRequest.ts b/specification/inference/put_azureaistudio/PutAzureAiStudioRequest.ts index d09f31b75c..6ab0d8b029 100644 --- a/specification/inference/put_azureaistudio/PutAzureAiStudioRequest.ts +++ b/specification/inference/put_azureaistudio/PutAzureAiStudioRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { AzureAiStudioServiceSettings, AzureAiStudioServiceType, @@ -54,6 +55,13 @@ export interface Request extends RequestBase { */ azureaistudio_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_azureopenai/PutAzureOpenAiRequest.ts b/specification/inference/put_azureopenai/PutAzureOpenAiRequest.ts index 63f0c42a8b..af881b68ef 100644 --- a/specification/inference/put_azureopenai/PutAzureOpenAiRequest.ts +++ b/specification/inference/put_azureopenai/PutAzureOpenAiRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { AzureOpenAIServiceSettings, AzureOpenAIServiceType, @@ -62,6 +63,13 @@ export interface Request extends RequestBase { */ azureopenai_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_cohere/PutCohereRequest.ts b/specification/inference/put_cohere/PutCohereRequest.ts index 52ddd382e7..6ebfc47292 100644 --- a/specification/inference/put_cohere/PutCohereRequest.ts +++ b/specification/inference/put_cohere/PutCohereRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { CohereServiceSettings, CohereServiceType, @@ -54,6 +55,13 @@ export interface Request extends RequestBase { */ cohere_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_elasticsearch/PutElasticsearchRequest.ts b/specification/inference/put_elasticsearch/PutElasticsearchRequest.ts index bdf2a8d991..b57b062f67 100644 --- a/specification/inference/put_elasticsearch/PutElasticsearchRequest.ts +++ b/specification/inference/put_elasticsearch/PutElasticsearchRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { ElasticsearchServiceSettings, ElasticsearchServiceType, @@ -68,6 +69,13 @@ export interface Request extends RequestBase { */ elasticsearch_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_elser/PutElserRequest.ts b/specification/inference/put_elser/PutElserRequest.ts index d9a4812243..179bb1e379 100644 --- a/specification/inference/put_elser/PutElserRequest.ts +++ b/specification/inference/put_elser/PutElserRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { ElserServiceSettings, ElserServiceType, @@ -68,6 +69,13 @@ export interface Request extends RequestBase { */ elser_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_googleaistudio/PutGoogleAiStudioRequest.ts b/specification/inference/put_googleaistudio/PutGoogleAiStudioRequest.ts index 691710a32e..6871ceb750 100644 --- a/specification/inference/put_googleaistudio/PutGoogleAiStudioRequest.ts +++ b/specification/inference/put_googleaistudio/PutGoogleAiStudioRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { GoogleAiServiceType, GoogleAiStudioServiceSettings, @@ -53,6 +54,13 @@ export interface Request extends RequestBase { */ googleaistudio_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_googlevertexai/PutGoogleVertexAiRequest.ts b/specification/inference/put_googlevertexai/PutGoogleVertexAiRequest.ts index 40a65dbb56..80cf04059e 100644 --- a/specification/inference/put_googlevertexai/PutGoogleVertexAiRequest.ts +++ b/specification/inference/put_googlevertexai/PutGoogleVertexAiRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { GoogleVertexAIServiceSettings, GoogleVertexAIServiceType, @@ -54,6 +55,13 @@ export interface Request extends RequestBase { */ googlevertexai_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_hugging_face/PutHuggingFaceRequest.ts b/specification/inference/put_hugging_face/PutHuggingFaceRequest.ts index 9c2b4855a7..1fa3113891 100644 --- a/specification/inference/put_hugging_face/PutHuggingFaceRequest.ts +++ b/specification/inference/put_hugging_face/PutHuggingFaceRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { HuggingFaceServiceSettings, HuggingFaceServiceType, @@ -67,6 +68,13 @@ export interface Request extends RequestBase { */ huggingface_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_jinaai/PutJinaAiRequest.ts b/specification/inference/put_jinaai/PutJinaAiRequest.ts index c34b80d4a4..6685c2c874 100644 --- a/specification/inference/put_jinaai/PutJinaAiRequest.ts +++ b/specification/inference/put_jinaai/PutJinaAiRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { JinaAIServiceSettings, JinaAIServiceType, @@ -57,6 +58,13 @@ export interface Request extends RequestBase { */ jinaai_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_mistral/PutMistralRequest.ts b/specification/inference/put_mistral/PutMistralRequest.ts index 0bc9713a87..e7f22db73a 100644 --- a/specification/inference/put_mistral/PutMistralRequest.ts +++ b/specification/inference/put_mistral/PutMistralRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { MistralServiceSettings, MistralServiceType, @@ -54,6 +55,13 @@ export interface Request extends RequestBase { */ mistral_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_openai/PutOpenAiRequest.ts b/specification/inference/put_openai/PutOpenAiRequest.ts index d322b89c36..b6bb675f46 100644 --- a/specification/inference/put_openai/PutOpenAiRequest.ts +++ b/specification/inference/put_openai/PutOpenAiRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { OpenAIServiceSettings, OpenAIServiceType, @@ -55,6 +56,13 @@ export interface Request extends RequestBase { */ openai_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_voyageai/PutVoyageAIRequest.ts b/specification/inference/put_voyageai/PutVoyageAIRequest.ts index 9d1c2e0006..2bb4b7ef46 100644 --- a/specification/inference/put_voyageai/PutVoyageAIRequest.ts +++ b/specification/inference/put_voyageai/PutVoyageAIRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { VoyageAIServiceSettings, VoyageAIServiceType, @@ -56,6 +57,13 @@ export interface Request extends RequestBase { */ voyageai_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The chunking configuration object. diff --git a/specification/inference/put_watsonx/PutWatsonxRequest.ts b/specification/inference/put_watsonx/PutWatsonxRequest.ts index f7f80f5a81..4e51746776 100644 --- a/specification/inference/put_watsonx/PutWatsonxRequest.ts +++ b/specification/inference/put_watsonx/PutWatsonxRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { WatsonxServiceSettings, WatsonxServiceType, @@ -55,6 +56,13 @@ export interface Request extends RequestBase { */ watsonx_inference_id: Id } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The type of service supported for the specified task type. In this case, `watsonxai`. diff --git a/specification/inference/stream_completion/StreamInferenceRequest.ts b/specification/inference/stream_completion/StreamInferenceRequest.ts index 1d2c83bee9..0e08af6a6f 100644 --- a/specification/inference/stream_completion/StreamInferenceRequest.ts +++ b/specification/inference/stream_completion/StreamInferenceRequest.ts @@ -19,6 +19,7 @@ import { RequestBase } from '@_types/Base' import { Id } from '@_types/common' +import { Duration } from '@_types/Time' import { TaskSettings } from '@inference/_types/Services' /** @@ -47,6 +48,13 @@ export interface Request extends RequestBase { */ inference_id: Id } + query_parameters: { + /** + * The amount of time to wait for the inference request to complete. + * @server_default 30s + */ + timeout?: Duration + } body: { /** * The text on which you want to perform the inference task.