diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 49578a926..d387c62eb 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -306,10 +306,7 @@ client.create({ id, index }) - **`id` (string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. - **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. - **`document` (Optional, object)**: A document. -- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. -- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. -- **`op_type` (Optional, Enum("index" \| "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. - **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. - **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. @@ -2518,7 +2515,7 @@ client.cat.indices({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. -- **`health` (Optional, Enum("green" \| "yellow" \| "red"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. +- **`health` (Optional, Enum("green" \| "yellow" \| "red" \| "unknown" \| "unavailable"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. - **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. - **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. @@ -2916,7 +2913,7 @@ Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. - **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. -- **`h` (Optional, Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. +- **`h` (Optional, Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason") \| Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason")[])**: A list of columns names to display. It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` @@ -3524,7 +3521,7 @@ client.cluster.health({ ... }) - **`wait_for_nodes` (Optional, string \| number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. +- **`wait_for_status` (Optional, Enum("green" \| "yellow" \| "red" \| "unknown" \| "unavailable"))**: One of green, yellow or red. Will wait (until the timeout provided) until the status of the cluster changes to the one provided or better, i.e. green > yellow > red. By default, will not wait for any status. ## client.cluster.info [_cluster.info] Get cluster info. @@ -4574,8 +4571,9 @@ If false, the sequence query will return successfully, but will always have empt - **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. -- **`allow_no_indices` (Optional, boolean)** -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])** +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution - **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. ## client.esql.asyncQuery [_esql.async_query] @@ -6736,6 +6734,12 @@ Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. ## client.indices.refresh [_indices.refresh] Refresh an index. @@ -7452,6 +7456,7 @@ client.inference.put({ inference_id }) - **`inference_id` (string)**: The inference Id - **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type. Refer to the integration list in the API description for the available task types. - **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAlibabacloud [_inference.put_alibabacloud] Create an AlibabaCloud AI Search inference endpoint. @@ -7474,6 +7479,7 @@ client.inference.putAlibabacloud({ task_type, alibabacloud_inference_id, service - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_token })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAmazonbedrock [_inference.put_amazonbedrock] Create an Amazon Bedrock inference endpoint. @@ -7499,6 +7505,7 @@ client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, servi - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAnthropic [_inference.put_anthropic] Create an Anthropic inference endpoint. @@ -7522,6 +7529,7 @@ The only valid task type for the model to perform is `completion`. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { max_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAzureaistudio [_inference.put_azureaistudio] Create an Azure AI studio inference endpoint. @@ -7544,6 +7552,7 @@ client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, servi - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putAzureopenai [_inference.put_azureopenai] Create an Azure OpenAI inference endpoint. @@ -7574,6 +7583,7 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { user })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putCohere [_inference.put_cohere] Create a Cohere inference endpoint. @@ -7597,6 +7607,7 @@ These settings are specific to the `cohere` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putElasticsearch [_inference.put_elasticsearch] Create an Elasticsearch inference endpoint. @@ -7633,6 +7644,7 @@ The must not match the `model_id`. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { return_documents })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putElser [_inference.put_elser] Create an ELSER inference endpoint. @@ -7667,6 +7679,7 @@ client.inference.putElser({ task_type, elser_inference_id, service, service_sett - **`service` (Enum("elser"))**: The type of service supported for the specified task type. In this case, `elser`. - **`service_settings` ({ adaptive_allocations, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elser` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putGoogleaistudio [_inference.put_googleaistudio] Create an Google AI Studio inference endpoint. @@ -7687,6 +7700,7 @@ client.inference.putGoogleaistudio({ task_type, googleaistudio_inference_id, ser - **`service` (Enum("googleaistudio"))**: The type of service supported for the specified task type. In this case, `googleaistudio`. - **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putGooglevertexai [_inference.put_googlevertexai] Create a Google Vertex AI inference endpoint. @@ -7709,6 +7723,7 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { auto_truncate, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putHuggingFace [_inference.put_hugging_face] Create a Hugging Face inference endpoint. @@ -7743,6 +7758,7 @@ client.inference.putHuggingFace({ task_type, huggingface_inference_id, service, - **`service` (Enum("hugging_face"))**: The type of service supported for the specified task type. In this case, `hugging_face`. - **`service_settings` ({ api_key, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `hugging_face` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putJinaai [_inference.put_jinaai] Create an JinaAI inference endpoint. @@ -7768,6 +7784,7 @@ client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_se - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { return_documents, task, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putMistral [_inference.put_mistral] Create a Mistral inference endpoint. @@ -7789,6 +7806,7 @@ The only valid task type for the model to perform is `text_embedding`. - **`service` (Enum("mistral"))**: The type of service supported for the specified task type. In this case, `mistral`. - **`service_settings` ({ api_key, max_input_tokens, model, rate_limit })**: Settings used to install the inference model. These settings are specific to the `mistral` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putOpenai [_inference.put_openai] Create an OpenAI inference endpoint. @@ -7812,6 +7830,7 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { user })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putVoyageai [_inference.put_voyageai] Create a VoyageAI inference endpoint. @@ -7836,6 +7855,7 @@ client.inference.putVoyageai({ task_type, voyageai_inference_id, service, servic - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { input_type, return_documents, top_k, truncation })**: Settings to configure the inference task. These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putWatsonx [_inference.put_watsonx] Create a Watsonx inference endpoint. @@ -7858,6 +7878,7 @@ The only valid task type for the model to perform is `text_embedding`. - **`watsonx_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("watsonxai"))**: The type of service supported for the specified task type. In this case, `watsonxai`. - **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.rerank [_inference.rerank] Perform reranking inference on the service @@ -7924,6 +7945,7 @@ It can be a single string or an array. NOTE: Inference endpoints for the completion task type currently only support a single string as input. - **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. ## client.inference.textEmbedding [_inference.text_embedding] Perform text embedding inference on the service diff --git a/src/api/api/create.ts b/src/api/api/create.ts index c63a5abf5..84d0fc832 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -38,10 +38,7 @@ const acceptedParams: Record extends RequestBase { * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. * If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ index: IndexName - /** Only perform the operation if the document has this primary term. */ - if_primary_term?: long - /** Only perform the operation if the document has this sequence number. */ - if_seq_no?: SequenceNumber /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean - /** Set to `create` to only index the document if it does not already exist (put if absent). - * If a document with the specified `_id` already exists, the indexing operation will fail. - * The behavior is the same as using the `/_create` endpoint. - * If a document ID is specified, this paramater defaults to `index`. - * Otherwise, it defaults to `create`. - * If the request targets a data stream, an `op_type` of `create` is required. */ - op_type?: OpType /** The ID of the pipeline to use to preprocess incoming documents. * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ @@ -338,9 +327,9 @@ export interface CreateRequest extends RequestBase { wait_for_active_shards?: WaitForActiveShards document?: TDocument /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + body?: string | { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + querystring?: { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } } export type CreateResponse = WriteResponseBase @@ -1030,7 +1019,7 @@ export interface HealthReportImpact { export type HealthReportImpactArea = 'search' | 'ingest' | 'backup' | 'deployment_management' -export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' +export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' | 'unavailable' export interface HealthReportIndicatorNode { name: string | null @@ -3818,6 +3807,11 @@ export interface DocStats { * This number is based on documents in Lucene segments. * Elasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged. */ deleted?: long + /** Returns the total size in bytes of all documents in this stats. + * This value may be more reliable than store_stats.size_in_bytes in estimating the index size. */ + total_size_in_bytes: long + /** Human readable total_size_in_bytes */ + total_size?: ByteSize } export type Duration = string | -1 | 0 @@ -3915,6 +3909,7 @@ export interface FielddataStats { memory_size?: ByteSize memory_size_in_bytes: long fields?: Record + global_ordinals: GlobalOrdinalsStats } export type Fields = Field | Field[] @@ -3983,9 +3978,21 @@ export interface GetStats { total: long } +export interface GlobalOrdinalFieldStats { + build_time_in_millis: UnitMillis + build_time?: string + shard_max_value_count: long +} + +export interface GlobalOrdinalsStats { + build_time_in_millis: UnitMillis + build_time?: string + fields?: Record +} + export type GrokPattern = string -export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' +export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' | 'unknown' | 'unavailable' export type Host = string @@ -4507,7 +4514,6 @@ export interface SegmentsStats { fixed_bit_set_memory_in_bytes: long /** Total amount of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory?: ByteSize - index_writer_max_memory_in_bytes?: long /** Total amount, in bytes, of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory_in_bytes: long /** Unix timestamp, in milliseconds, of the most recently retried indexing request. */ @@ -4524,15 +4530,16 @@ export interface SegmentsStats { points_memory?: ByteSize /** Total amount, in bytes, of memory used for points across all shards assigned to selected nodes. */ points_memory_in_bytes: long - stored_memory?: ByteSize /** Total amount, in bytes, of memory used for stored fields across all shards assigned to selected nodes. */ stored_fields_memory_in_bytes: long + /** Total amount of memory used for stored fields across all shards assigned to selected nodes. */ + stored_fields_memory?: ByteSize /** Total amount, in bytes, of memory used for terms across all shards assigned to selected nodes. */ terms_memory_in_bytes: long /** Total amount of memory used for terms across all shards assigned to selected nodes. */ terms_memory?: ByteSize /** Total amount of memory used for term vectors across all shards assigned to selected nodes. */ - term_vectory_memory?: ByteSize + term_vectors_memory?: ByteSize /** Total amount, in bytes, of memory used for term vectors across all shards assigned to selected nodes. */ term_vectors_memory_in_bytes: long /** Total amount of memory used by all version maps across all shards assigned to selected nodes. */ @@ -10229,7 +10236,7 @@ export type CatCatShardColumns = CatCatShardColumn | CatCatShardColumn[] export type CatCatSnapshotsColumn = 'id' | 'snapshot' | 'repository' | 're' | 'repo' | 'status' | 's' | 'start_epoch' | 'ste' | 'startEpoch' | 'start_time' | 'sti' | 'startTime' | 'end_epoch' | 'ete' | 'endEpoch' | 'end_time' | 'eti' | 'endTime' | 'duration' | 'dur' | 'indices' | 'i' | 'successful_shards' | 'ss' | 'failed_shards' | 'fs' | 'total_shards' | 'ts' | 'reason' | 'r' | string -export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatNodeColumn[] +export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatSnapshotsColumn[] export type CatCatThreadPoolColumn = 'active' | 'a' | 'completed' | 'c' | 'core' | 'cr' | 'ephemeral_id' | 'eid' | 'host' | 'h' | 'ip' | 'i' | 'keep_alive' | 'k' | 'largest' | 'l' | 'max' | 'mx' | 'name' | 'node_id' | 'id' | 'node_name' | 'pid' | 'p' | 'pool_size' | 'psz' | 'port' | 'po' | 'queue' | 'q' | 'queue_size' | 'qs' | 'rejected' | 'r' | 'size' | 'sz' | 'type' | 't' | string @@ -16262,22 +16269,43 @@ export interface ClusterStatsCharFilterTypes { filter_types: ClusterStatsFieldTypes[] /** Contains statistics about tokenizer types used in selected nodes. */ tokenizer_types: ClusterStatsFieldTypes[] + /** Contains statistics about synonyms types used in selected nodes. */ + synonyms: Record } export interface ClusterStatsClusterFileSystem { + path?: string + mount?: string + type?: string /** Total number of bytes available to JVM in file stores across all selected nodes. * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ - available_in_bytes: long + available_in_bytes?: long + /** Total number of bytes available to JVM in file stores across all selected nodes. + * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. + * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ + available?: ByteSize + /** Total number, in bytes, of unallocated bytes in file stores across all selected nodes. */ + free_in_bytes?: long /** Total number of unallocated bytes in file stores across all selected nodes. */ - free_in_bytes: long + free?: ByteSize /** Total size, in bytes, of all file stores across all selected nodes. */ - total_in_bytes: long + total_in_bytes?: long + /** Total size of all file stores across all selected nodes. */ + total?: ByteSize + low_watermark_free_space?: ByteSize + low_watermark_free_space_in_bytes?: long + high_watermark_free_space?: ByteSize + high_watermark_free_space_in_bytes?: long + flood_stage_free_space?: ByteSize + flood_stage_free_space_in_bytes?: long + frozen_flood_stage_free_space?: ByteSize + frozen_flood_stage_free_space_in_bytes?: long } export interface ClusterStatsClusterIndices { /** Contains statistics about analyzers and analyzer components used in selected nodes. */ - analysis: ClusterStatsCharFilterTypes + analysis?: ClusterStatsCharFilterTypes /** Contains statistics about memory used for completion in selected nodes. */ completion: CompletionStats /** Total number of indices with shards assigned to selected nodes. */ @@ -16288,6 +16316,10 @@ export interface ClusterStatsClusterIndices { fielddata: FielddataStats /** Contains statistics about the query cache of selected nodes. */ query_cache: QueryCacheStats + /** Holds a snapshot of the search usage statistics. + * Used to hold the stats for a single node that's part of a ClusterStatsNodeResponse, as well as to + * accumulate stats for the entire cluster and return them as part of the ClusterStatsResponse. */ + search: ClusterStatsSearchUsageStats /** Contains statistics about segments in selected nodes. */ segments: SegmentsStats /** Contains statistics about indices with shards assigned to selected nodes. */ @@ -16295,9 +16327,13 @@ export interface ClusterStatsClusterIndices { /** Contains statistics about the size of shards assigned to selected nodes. */ store: StoreStats /** Contains statistics about field mappings in selected nodes. */ - mappings: ClusterStatsFieldTypesMappings + mappings?: ClusterStatsFieldTypesMappings /** Contains statistics about analyzers and analyzer components used in selected nodes. */ versions?: ClusterStatsIndicesVersions[] + /** Contains statistics about indexed dense vector */ + dense_vector: ClusterStatsDenseVectorStats + /** Contains statistics about indexed sparse vector */ + sparse_vector: ClusterStatsSparseVectorStats } export interface ClusterStatsClusterIndicesShards { @@ -16328,6 +16364,8 @@ export interface ClusterStatsClusterIngest { export interface ClusterStatsClusterJvm { /** Uptime duration, in milliseconds, since JVM last started. */ max_uptime_in_millis: DurationValue + /** Uptime duration since JVM last started. */ + max_uptime?: Duration /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsClusterJvmMemory /** Number of active threads in use by JVM across all selected nodes. */ @@ -16339,8 +16377,12 @@ export interface ClusterStatsClusterJvm { export interface ClusterStatsClusterJvmMemory { /** Maximum amount of memory, in bytes, available for use by the heap across all selected nodes. */ heap_max_in_bytes: long + /** Maximum amount of memory available for use by the heap across all selected nodes. */ + heap_max?: ByteSize /** Memory, in bytes, currently in use by the heap across all selected nodes. */ heap_used_in_bytes: long + /** Memory currently in use by the heap across all selected nodes. */ + heap_used?: ByteSize } export interface ClusterStatsClusterJvmVersion { @@ -16369,20 +16411,22 @@ export interface ClusterStatsClusterNetworkTypes { } export interface ClusterStatsClusterNodeCount { - coordinating_only: integer - data: integer - data_cold: integer - data_content: integer - data_frozen?: integer - data_hot: integer - data_warm: integer - ingest: integer - master: integer - ml: integer - remote_cluster_client: integer total: integer - transform: integer - voting_only: integer + coordinating_only?: integer + data?: integer + data_cold?: integer + data_content?: integer + data_frozen?: integer + data_hot?: integer + data_warm?: integer + index?: integer + ingest?: integer + master?: integer + ml?: integer + remote_cluster_client?: integer + search?: integer + transform?: integer + voting_only?: integer } export interface ClusterStatsClusterNodes { @@ -16491,6 +16535,30 @@ export interface ClusterStatsClusterShardMetrics { min: double } +export interface ClusterStatsClusterSnapshotStats { + current_counts: ClusterStatsSnapshotCurrentCounts + repositories: Record +} + +export interface ClusterStatsDenseVectorOffHeapStats { + total_size_bytes: long + total_size?: ByteSize + total_veb_size_bytes: long + total_veb_size?: ByteSize + total_vec_size_bytes: long + total_vec_size?: ByteSize + total_veq_size_bytes: long + total_veq_size?: ByteSize + total_vex_size_bytes: long + total_vex_size?: ByteSize + fielddata?: Record> +} + +export interface ClusterStatsDenseVectorStats { + value_count: long + off_heap?: ClusterStatsDenseVectorOffHeapStats +} + export interface ClusterStatsFieldTypes { /** The name for the field type in selected nodes. */ name: Name @@ -16499,55 +16567,47 @@ export interface ClusterStatsFieldTypes { /** The number of indices containing the field type in selected nodes. */ index_count: integer /** For dense_vector field types, number of indexed vector types in selected nodes. */ - indexed_vector_count?: long + indexed_vector_count?: integer /** For dense_vector field types, the maximum dimension of all indexed vector types in selected nodes. */ - indexed_vector_dim_max?: long + indexed_vector_dim_max?: integer /** For dense_vector field types, the minimum dimension of all indexed vector types in selected nodes. */ - indexed_vector_dim_min?: long + indexed_vector_dim_min?: integer /** The number of fields that declare a script. */ script_count?: integer + /** For dense_vector field types, count of mappings by index type */ + vector_index_type_count?: Record + /** For dense_vector field types, count of mappings by similarity */ + vector_similarity_type_count?: Record + /** For dense_vector field types, count of mappings by element type */ + vector_element_type_count?: Record } export interface ClusterStatsFieldTypesMappings { /** Contains statistics about field data types used in selected nodes. */ field_types: ClusterStatsFieldTypes[] /** Contains statistics about runtime field data types used in selected nodes. */ - runtime_field_types?: ClusterStatsRuntimeFieldTypes[] + runtime_field_types: ClusterStatsRuntimeFieldTypes[] /** Total number of fields in all non-system indices. */ - total_field_count?: integer + total_field_count?: long /** Total number of fields in all non-system indices, accounting for mapping deduplication. */ - total_deduplicated_field_count?: integer + total_deduplicated_field_count?: long /** Total size of all mappings after deduplication and compression. */ total_deduplicated_mapping_size?: ByteSize /** Total size of all mappings, in bytes, after deduplication and compression. */ total_deduplicated_mapping_size_in_bytes?: long + /** Source mode usage count. */ + source_modes: Record } export interface ClusterStatsIndexingPressure { - memory: ClusterStatsIndexingPressureMemory -} - -export interface ClusterStatsIndexingPressureMemory { - current: ClusterStatsIndexingPressureMemorySummary - limit_in_bytes: long - total: ClusterStatsIndexingPressureMemorySummary -} - -export interface ClusterStatsIndexingPressureMemorySummary { - all_in_bytes: long - combined_coordinating_and_primary_in_bytes: long - coordinating_in_bytes: long - coordinating_rejections?: long - primary_in_bytes: long - primary_rejections?: long - replica_in_bytes: long - replica_rejections?: long + memory: NodesIndexingPressureMemory } export interface ClusterStatsIndicesVersions { index_count: integer primary_shard_count: integer total_primary_bytes: long + total_primary_size?: ByteSize version: VersionString } @@ -16563,18 +16623,33 @@ export interface ClusterStatsNodePackagingType { export interface ClusterStatsOperatingSystemMemoryInfo { /** Total amount, in bytes, of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ adjusted_total_in_bytes?: long + /** Total amount of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ + adjusted_total?: ByteSize /** Amount, in bytes, of free physical memory across all selected nodes. */ free_in_bytes: long + /** Amount of free physical memory across all selected nodes. */ + free?: ByteSize /** Percentage of free physical memory across all selected nodes. */ free_percent: integer /** Total amount, in bytes, of physical memory across all selected nodes. */ total_in_bytes: long + /** Total amount of physical memory across all selected nodes. */ + total?: ByteSize /** Amount, in bytes, of physical memory in use across all selected nodes. */ used_in_bytes: long + /** Amount of physical memory in use across all selected nodes. */ + used?: ByteSize /** Percentage of physical memory in use across all selected nodes. */ used_percent: integer } +export interface ClusterStatsPerRepositoryStats { + type: string + oldest_start_time_millis: UnitMillis + oldest_start_time?: DateFormat + current_counts: ClusterStatsRepositoryStatsCurrentCounts +} + export interface ClusterStatsRemoteClusterInfo { /** The UUID of the remote cluster. */ cluster_uuid: string @@ -16583,7 +16658,7 @@ export interface ClusterStatsRemoteClusterInfo { /** The `skip_unavailable` setting used for this remote cluster. */ skip_unavailable: boolean /** Transport compression setting used for this remote cluster. */ - transport_compress: string + 'transport.compress': string /** Health status of the cluster, based on the state of its primary and replica shards. */ status: HealthStatus /** The list of Elasticsearch versions used by the nodes on the remote cluster. */ @@ -16608,6 +16683,23 @@ export interface ClusterStatsRemoteClusterInfo { mem_total?: string } +export interface ClusterStatsRepositoryStatsCurrentCounts { + snapshots: integer + clones: integer + finalizations: integer + deletions: integer + snapshot_deletions: integer + active_deletions: integer + shards: ClusterStatsRepositoryStatsShards +} + +export interface ClusterStatsRepositoryStatsShards { + total: integer + complete: integer + incomplete: integer + states: Record +} + export interface ClusterStatsRequest extends RequestBase { /** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ node_id?: NodeIds @@ -16656,6 +16748,33 @@ export interface ClusterStatsRuntimeFieldTypes { source_total: integer } +export interface ClusterStatsSearchUsageStats { + total: long + queries: Record + rescorers: Record + sections: Record + retrievers: Record +} + +export type ClusterStatsShardState = 'INIT' | 'SUCCESS' | 'FAILED' | 'ABORTED' | 'MISSING' | 'WAITING' | 'QUEUED' | 'PAUSED_FOR_NODE_REMOVAL' + +export interface ClusterStatsSnapshotCurrentCounts { + /** Snapshots currently in progress */ + snapshots: integer + /** Incomplete shard snapshots */ + shard_snapshots: integer + /** Snapshots deletions in progress */ + snapshot_deletions: integer + /** Sum of snapshots and snapshot_deletions */ + concurrent_operations: integer + /** Cleanups in progress, not counted in concurrent_operations as they are not concurrent */ + cleanups: integer +} + +export interface ClusterStatsSparseVectorStats { + value_count: long +} + export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { /** Name of the cluster, based on the cluster name setting. */ cluster_name: Name @@ -16665,14 +16784,23 @@ export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { indices: ClusterStatsClusterIndices /** Contains statistics about nodes selected by the request’s node filters. */ nodes: ClusterStatsClusterNodes + /** Contains stats on repository feature usage exposed in cluster stats for telemetry. */ + repositories: Record> + /** Contains stats cluster snapshots. */ + snapshots: ClusterStatsClusterSnapshotStats /** Health status of the cluster, based on the state of its primary and replica shards. */ - status: HealthStatus + status?: HealthStatus /** Unix timestamp, in milliseconds, for the last time the cluster statistics were refreshed. */ timestamp: long /** Cross-cluster stats */ ccs: ClusterStatsCCSStats } +export interface ClusterStatsSynonymsStats { + count: integer + index_count: integer +} + export interface ConnectorConnector { api_key_id?: string api_key_secret_id?: string @@ -17665,8 +17793,12 @@ export interface EqlGetStatusResponse { export interface EqlSearchRequest extends RequestBase { /** The name of the index to scope the operation */ index: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards + /** Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution */ + ccs_minimize_roundtrips?: boolean /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean /** EQL query you wish to run. */ @@ -17704,9 +17836,9 @@ export interface EqlSearchRequest extends RequestBase { * `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ max_samples_per_key?: integer /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } } export type EqlSearchResponse = EqlEqlSearchResponseBase @@ -19693,6 +19825,10 @@ export interface IndicesDeleteRequest extends RequestBase { export type IndicesDeleteResponse = IndicesResponseBase +export interface IndicesDeleteAliasIndicesAliasesResponseBody extends AcknowledgedResponseBase { + errors?: boolean +} + export interface IndicesDeleteAliasRequest extends RequestBase { /** Comma-separated list of data streams or indices used to limit the request. * Supports wildcards (`*`). */ @@ -19712,7 +19848,7 @@ export interface IndicesDeleteAliasRequest extends RequestBase { querystring?: { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } } -export type IndicesDeleteAliasResponse = AcknowledgedResponseBase +export type IndicesDeleteAliasResponse = IndicesDeleteAliasIndicesAliasesResponseBody export interface IndicesDeleteDataLifecycleRequest extends RequestBase { /** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ @@ -20869,10 +21005,19 @@ export interface IndicesRecoveryRequest extends RequestBase { active_only?: boolean /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, active_only?: never, detailed?: never } + body?: string | { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, active_only?: never, detailed?: never } + querystring?: { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } } export type IndicesRecoveryResponse = Record @@ -22865,11 +23010,13 @@ export interface InferencePutRequest extends RequestBase { task_type?: InferenceTaskType /** The inference Id */ inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration inference_config?: InferenceInferenceEndpoint /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, inference_config?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, inference_config?: never } } export type InferencePutResponse = InferenceInferenceEndpointInfo @@ -22879,6 +23026,8 @@ export interface InferencePutAlibabacloudRequest extends RequestBase { task_type: InferenceAlibabaCloudTaskType /** The unique identifier of the inference endpoint. */ alibabacloud_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. */ @@ -22889,9 +23038,9 @@ export interface InferencePutAlibabacloudRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAlibabaCloudTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAlibabaCloudAI @@ -22901,6 +23050,8 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase { task_type: InferenceAmazonBedrockTaskType /** The unique identifier of the inference endpoint. */ amazonbedrock_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `amazonbedrock`. */ @@ -22911,9 +23062,9 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAmazonBedrockTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock @@ -22924,6 +23075,8 @@ export interface InferencePutAnthropicRequest extends RequestBase { task_type: InferenceAnthropicTaskType /** The unique identifier of the inference endpoint. */ anthropic_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `anthropic`. */ @@ -22934,9 +23087,9 @@ export interface InferencePutAnthropicRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAnthropicTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic @@ -22946,6 +23099,8 @@ export interface InferencePutAzureaistudioRequest extends RequestBase { task_type: InferenceAzureAiStudioTaskType /** The unique identifier of the inference endpoint. */ azureaistudio_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `azureaistudio`. */ @@ -22956,9 +23111,9 @@ export interface InferencePutAzureaistudioRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAzureAiStudioTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio @@ -22969,6 +23124,8 @@ export interface InferencePutAzureopenaiRequest extends RequestBase { task_type: InferenceAzureOpenAITaskType /** The unique identifier of the inference endpoint. */ azureopenai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `azureopenai`. */ @@ -22979,9 +23136,9 @@ export interface InferencePutAzureopenaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAzureOpenAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI @@ -22991,6 +23148,8 @@ export interface InferencePutCohereRequest extends RequestBase { task_type: InferenceCohereTaskType /** The unique identifier of the inference endpoint. */ cohere_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `cohere`. */ @@ -23002,9 +23161,9 @@ export interface InferencePutCohereRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceCohereTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere @@ -23015,6 +23174,8 @@ export interface InferencePutElasticsearchRequest extends RequestBase { /** The unique identifier of the inference endpoint. * The must not match the `model_id`. */ elasticsearch_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `elasticsearch`. */ @@ -23025,9 +23186,9 @@ export interface InferencePutElasticsearchRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceElasticsearchTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch @@ -23037,6 +23198,8 @@ export interface InferencePutElserRequest extends RequestBase { task_type: InferenceElserTaskType /** The unique identifier of the inference endpoint. */ elser_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `elser`. */ @@ -23044,9 +23207,9 @@ export interface InferencePutElserRequest extends RequestBase { /** Settings used to install the inference model. These settings are specific to the `elser` service. */ service_settings: InferenceElserServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, elser_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER @@ -23056,6 +23219,8 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase { task_type: InferenceGoogleAiStudioTaskType /** The unique identifier of the inference endpoint. */ googleaistudio_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `googleaistudio`. */ @@ -23063,9 +23228,9 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase { /** Settings used to install the inference model. These settings are specific to the `googleaistudio` service. */ service_settings: InferenceGoogleAiStudioServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio @@ -23075,6 +23240,8 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase { task_type: InferenceGoogleVertexAITaskType /** The unique identifier of the inference endpoint. */ googlevertexai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `googlevertexai`. */ @@ -23085,9 +23252,9 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceGoogleVertexAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI @@ -23097,6 +23264,8 @@ export interface InferencePutHuggingFaceRequest extends RequestBase { task_type: InferenceHuggingFaceTaskType /** The unique identifier of the inference endpoint. */ huggingface_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `hugging_face`. */ @@ -23104,9 +23273,9 @@ export interface InferencePutHuggingFaceRequest extends RequestBase { /** Settings used to install the inference model. These settings are specific to the `hugging_face` service. */ service_settings: InferenceHuggingFaceServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace @@ -23116,6 +23285,8 @@ export interface InferencePutJinaaiRequest extends RequestBase { task_type: InferenceJinaAITaskType /** The unique identifier of the inference endpoint. */ jinaai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `jinaai`. */ @@ -23126,9 +23297,9 @@ export interface InferencePutJinaaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceJinaAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi @@ -23139,6 +23310,8 @@ export interface InferencePutMistralRequest extends RequestBase { task_type: InferenceMistralTaskType /** The unique identifier of the inference endpoint. */ mistral_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `mistral`. */ @@ -23146,9 +23319,9 @@ export interface InferencePutMistralRequest extends RequestBase { /** Settings used to install the inference model. These settings are specific to the `mistral` service. */ service_settings: InferenceMistralServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral @@ -23159,6 +23332,8 @@ export interface InferencePutOpenaiRequest extends RequestBase { task_type: InferenceOpenAITaskType /** The unique identifier of the inference endpoint. */ openai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `openai`. */ @@ -23169,9 +23344,9 @@ export interface InferencePutOpenaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceOpenAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, openai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI @@ -23181,6 +23356,8 @@ export interface InferencePutVoyageaiRequest extends RequestBase { task_type: InferenceVoyageAITaskType /** The unique identifier of the inference endpoint. */ voyageai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The chunking configuration object. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `voyageai`. */ @@ -23191,9 +23368,9 @@ export interface InferencePutVoyageaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceVoyageAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI @@ -23204,14 +23381,16 @@ export interface InferencePutWatsonxRequest extends RequestBase { task_type: InferenceWatsonxTaskType /** The unique identifier of the inference endpoint. */ watsonx_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration /** The type of service supported for the specified task type. In this case, `watsonxai`. */ service: InferenceWatsonxServiceType /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ service_settings: InferenceWatsonxServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, service?: never, service_settings?: never } } export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx @@ -23261,6 +23440,8 @@ export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInference export interface InferenceStreamCompletionRequest extends RequestBase { /** The unique identifier for the inference endpoint. */ inference_id: Id + /** The amount of time to wait for the inference request to complete. */ + timeout?: Duration /** The text on which you want to perform the inference task. * It can be a single string or an array. * @@ -23269,9 +23450,9 @@ export interface InferenceStreamCompletionRequest extends RequestBase { /** Optional task settings */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { inference_id?: never, input?: never, task_settings?: never } + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { inference_id?: never, input?: never, task_settings?: never } + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } export type InferenceStreamCompletionResponse = StreamResult @@ -29849,6 +30030,8 @@ export interface NodesJvmMemoryStats { heap_committed_in_bytes?: long /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes?: long + /** Maximum amount of memory, available for use by the heap. */ + heap_max?: ByteSize /** Non-heap memory used, in bytes. */ non_heap_used_in_bytes?: long /** Amount of non-heap memory available, in bytes. */ @@ -29958,6 +30141,8 @@ export interface NodesPressureMemory { primary_rejections?: long /** Number of indexing requests rejected in the replica stage. */ replica_rejections?: long + primary_document_rejections?: long + large_operation_rejections?: long } export interface NodesProcess {