From 66701570fb4280912bdd223e0d03219435a9ddf1 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 7 Jul 2025 06:04:15 +0000 Subject: [PATCH] Auto-generated API code --- docs/reference/api-reference.md | 166 +++++++++++------- src/api/api/indices.ts | 126 +++++++++++++- src/api/api/inference.ts | 126 +++++++++++++- src/api/api/rollup.ts | 2 +- src/api/api/synonyms.ts | 2 +- src/api/api/update.ts | 2 +- src/api/api/update_by_query.ts | 2 +- src/api/types.ts | 292 ++++++++++++++++++++++++++------ 8 files changed, 589 insertions(+), 129 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 681b7b0d0..323a5a73b 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1857,6 +1857,7 @@ The document must still be reindexed, but using this API removes some network ro The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). +For usage examples such as partial updates, upserts, and scripted updates, see the External documentation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update) @@ -1914,6 +1915,30 @@ A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. +**Refreshing shards** + +Specifying the `refresh` parameter refreshes all shards once the request completes. +This is different to the update API's `refresh` parameter, which causes only the shard +that received the request to be refreshed. Unlike the update API, it does not support +`wait_for`. + +**Running update by query asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch +performs some preflight checks, launches the request, and returns a +[task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) you can use to cancel or get the status of the task. +Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + +**Waiting for active shards** + +`wait_for_active_shards` controls how many copies of a shard must be active +before proceeding with the request. See [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards) +for details. `timeout` controls how long each write request waits for unavailable +shards to become available. Both work exactly the way they work in the +[Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). Update by query uses scrolled searches, so you can also +specify the `scroll` parameter to control how long it keeps the search context +alive, for example `?scroll=10m`. The default is 5 minutes. + **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. @@ -1958,22 +1983,7 @@ If you're slicing manually or otherwise tuning automatic slicing, keep in mind t * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. - -**Update the document source** - -Update by query supports scripts to update the document source. -As with the update API, you can set `ctx.op` to change the operation that is performed. - -Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. -The update by query operation skips updating the document and increments the `noop` counter. - -Set `ctx.op = "delete"` if your script decides that the document should be deleted. -The update by query operation deletes the document and increments the `deleted` counter. - -Update by query supports only `index`, `noop`, and `delete`. -Setting `ctx.op` to anything else is an error. -Setting any other field in `ctx` is an error. -This API enables you to only modify the source of matching documents; you cannot move them. +Refer to the linked documentation for examples of how to update documents using the `_update_by_query` API: [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) @@ -2795,11 +2805,12 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para - **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis") \| Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.repositories [_cat.repositories] Get snapshot repository information. @@ -2846,8 +2857,9 @@ client.cat.segments({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id") \| Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the @@ -2876,12 +2888,12 @@ client.cat.shards({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason") \| Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.snapshots [_cat.snapshots] Get snapshot information. @@ -2904,7 +2916,8 @@ Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. - **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. +It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2985,8 +2998,8 @@ client.cat.threadPool({ ... }) #### Request (object) [_request_cat.thread_pool] - **`thread_pool_patterns` (Optional, string \| string[])**: A list of thread pool names used to limit the request. Accepts wildcard expressions. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type") \| Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. @@ -2994,7 +3007,7 @@ or `:desc` as a suffix to the column name. local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. ## client.cat.transforms [_cat.transforms] Get transform information. @@ -5265,7 +5278,7 @@ This could be a built-in analyzer, or an analyzer that’s been configured in th - **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. -- **`filter` (Optional, string \| { type } \| { type } \| { type, preserve_original } \| { type, ignored_scripts, output_unigrams } \| { type } \| { type } \| { type, common_words, common_words_path, ignore_case, query_mode } \| { type, filter, script } \| { type } \| { type, delimiter, encoding } \| { type, max_gram, min_gram, side, preserve_original } \| { type, articles, articles_path, articles_case } \| { type, max_output_size, separator } \| { type } \| { type } \| { type } \| { type, dedup, dictionary, locale, longest_only } \| { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } \| { type } \| { type, mode, types } \| { type, keep_words, keep_words_case, keep_words_path } \| { type, ignore_case, keywords, keywords_path, keywords_pattern } \| { type } \| { type } \| { type, max, min } \| { type, consume_all_tokens, max_token_count } \| { type, language } \| { type, bucket_count, hash_count, hash_set_size, with_rotation } \| { type, filters, preserve_original } \| { type, max_gram, min_gram, preserve_original } \| { type, stoptags } \| { type, patterns, preserve_original } \| { type, all, pattern, replacement } \| { type } \| { type } \| { type, script } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } \| { type, language } \| { type } \| { type, rules, rules_path } \| { type, language } \| { type, ignore_case, remove_trailing, stopwords, stopwords_path } \| { type } \| { type } \| { type } \| { type, length } \| { type, only_on_same_position } \| { type } \| { type, adjust_offsets, ignore_keywords } \| { type } \| { type, stopwords } \| { type, minimum_length } \| { type, use_romaji } \| { type, stoptags } \| { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } \| { type, unicode_set_filter } \| { type, name } \| { type, dir, id } \| { type, encoder, languageset, max_code_len, name_type, replace, rule_type } \| { type }[])**: Array of token filters used to apply after the tokenizer. +- **`filter` (Optional, string \| { type } \| { type } \| { type } \| { type, preserve_original } \| { type } \| { type } \| { type, ignored_scripts, output_unigrams } \| { type } \| { type } \| { type, common_words, common_words_path, ignore_case, query_mode } \| { type, filter, script } \| { type } \| { type } \| { type, delimiter, encoding } \| { type } \| { type, max_gram, min_gram, side, preserve_original } \| { type, articles, articles_path, articles_case } \| { type, max_output_size, separator } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, dedup, dictionary, locale, longest_only } \| { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } \| { type } \| { type, mode, types } \| { type, keep_words, keep_words_case, keep_words_path } \| { type, ignore_case, keywords, keywords_path, keywords_pattern } \| { type } \| { type } \| { type, max, min } \| { type, consume_all_tokens, max_token_count } \| { type, language } \| { type, bucket_count, hash_count, hash_set_size, with_rotation } \| { type, filters, preserve_original } \| { type, max_gram, min_gram, preserve_original } \| { type, stoptags } \| { type, patterns, preserve_original } \| { type, all, flags, pattern, replacement } \| { type } \| { type } \| { type } \| { type, script } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } \| { type, language } \| { type } \| { type, rules, rules_path } \| { type, language } \| { type, ignore_case, remove_trailing, stopwords, stopwords_path } \| { type } \| { type } \| { type } \| { type, length } \| { type, only_on_same_position } \| { type } \| { type, adjust_offsets, ignore_keywords } \| { type } \| { type, stopwords } \| { type, minimum_length } \| { type, use_romaji } \| { type, stoptags } \| { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } \| { type, unicode_set_filter } \| { type, name } \| { type, dir, id } \| { type, encoder, languageset, max_code_len, name_type, replace, rule_type } \| { type }[])**: Array of token filters used to apply after the tokenizer. - **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. - **`text` (Optional, string \| string[])**: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. @@ -6659,7 +6672,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. - **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. -- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -6928,14 +6941,38 @@ client.indices.reloadSearchAnalyzers({ index }) - **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable ## client.indices.removeBlock [_indices.remove_block] -Removes a block from an index. +Remove an index block. -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html) +Remove an index block from an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block) ```ts -client.indices.removeBlock() +client.indices.removeBlock({ index, block }) ``` +### Arguments [_arguments_indices.remove_block] + +#### Request (object) [_request_indices.remove_block] +- **`index` (string)**: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are removing blocks from. +To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +- **`block` (Enum("metadata" \| "read" \| "read_only" \| "write"))**: The block type to remove from the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. ## client.indices.resolveCluster [_indices.resolve_cluster] Resolve the cluster. @@ -7563,6 +7600,7 @@ The following integrations are available through the inference API. You can find * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) +* DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) @@ -7634,6 +7672,16 @@ client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, servi - **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. +## client.inference.putAmazonsagemaker [_inference.put_amazonsagemaker] +Configure a Amazon SageMaker inference endpoint + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-amazon-sagemaker.html) + +```ts +client.inference.putAmazonsagemaker() +``` + + ## client.inference.putAnthropic [_inference.put_anthropic] Create an Anthropic inference endpoint. @@ -7732,6 +7780,27 @@ These settings are specific to the `cohere` service. - **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. These settings are specific to the task type you specified. +## client.inference.putDeepseek [_inference.put_deepseek] +Create a DeepSeek inference endpoint. + +Create an inference endpoint to perform an inference task with the `deepseek` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek) + +```ts +client.inference.putDeepseek({ task_type, deepseek_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_deepseek] + +#### Request (object) [_request_inference.put_deepseek] +- **`task_type` (Enum("completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`deepseek_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("deepseek"))**: The type of service supported for the specified task type. In this case, `deepseek`. +- **`service_settings` ({ api_key, model_id, url })**: Settings used to install the inference model. +These settings are specific to the `deepseek` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. + ## client.inference.putElasticsearch [_inference.put_elasticsearch] Create an Elasticsearch inference endpoint. @@ -10997,32 +11066,7 @@ The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. -**Searching both historical rollup and non-rollup data** - -The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. -This is done by simply adding the live indices to the URI. For example: - -``` -GET sensor-1,sensor_rollup/_rollup_search -{ - "size": 0, - "aggregations": { - "max_temperature": { - "max": { - "field": "temperature" - } - } - } -} -``` - -The rollup search endpoint does two things when the search runs: - -* The original request is sent to the non-rollup index unaltered. -* A rewritten version of the original request is sent to the rollup index. - -When the two responses are received, the endpoint rewrites the rollup response and merges the two together. -During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. +For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search) @@ -14303,6 +14347,8 @@ If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. +For practical examples of how to create or update a synonyms set, refer to the External documentation. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym) ```ts diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 8a9c806e1..296d93e83 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -419,6 +419,13 @@ export default class Indices { 'verbose' ] }, + 'indices.get_data_stream_mappings': { + path: [ + 'name' + ], + body: [], + query: [] + }, 'indices.get_data_stream_options': { path: [ 'name' @@ -589,6 +596,13 @@ export default class Indices { 'timeout' ] }, + 'indices.put_data_stream_mappings': { + path: [ + 'name' + ], + body: [], + query: [] + }, 'indices.put_data_stream_options': { path: [ 'name' @@ -739,7 +753,13 @@ export default class Indices { 'block' ], body: [], - query: [] + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] }, 'indices.resolve_cluster': { path: [ @@ -2486,6 +2506,51 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Gets a data stream's mappings + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + */ + async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream_mappings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_mappings` + const meta: TransportRequestMetadata = { + name: 'indices.get_data_stream_mappings', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Get data stream options. Get the data stream options configuration of one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} @@ -3248,6 +3313,51 @@ export default class Indices { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Updates a data stream's mappings + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + */ + async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putDataStreamMappings (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['indices.put_data_stream_mappings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_mappings` + const meta: TransportRequestMetadata = { + name: 'indices.put_data_stream_mappings', + pathParts: { + name: params.name + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Update data stream options. Update the data stream options of the specified data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index.html | Elasticsearch API documentation} @@ -3729,13 +3839,13 @@ export default class Indices { } /** - * Removes a block from an index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html | Elasticsearch API documentation} + * Remove an index block. Remove an index block from an index. Index blocks limit the operations allowed on an index by blocking specific operation types. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block | Elasticsearch API documentation} */ - async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async removeBlock (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptions): Promise + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this.acceptedParams['indices.remove_block'] @@ -3753,11 +3863,11 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index f50074f85..8f0287319 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -127,6 +127,14 @@ export default class Inference { ], query: [] }, + 'inference.put_amazonsagemaker': { + path: [ + 'task_type', + 'amazonsagemaker_inference_id' + ], + body: [], + query: [] + }, 'inference.put_anthropic': { path: [ 'task_type', @@ -179,6 +187,18 @@ export default class Inference { ], query: [] }, + 'inference.put_deepseek': { + path: [ + 'task_type', + 'deepseek_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [] + }, 'inference.put_elasticsearch': { path: [ 'task_type', @@ -644,7 +664,7 @@ export default class Inference { } /** - * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) + * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`completion`, `chat_completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`text_embedding`, `rerank`) * Watsonx inference integration (`text_embedding`) * JinaAI (`text_embedding`, `rerank`) * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -814,6 +834,52 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Configure a Amazon SageMaker inference endpoint + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-service-amazon-sagemaker.html | Elasticsearch API documentation} + */ + async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async putAmazonsagemaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this.acceptedParams['inference.put_amazonsagemaker'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.amazonsagemaker_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_amazonsagemaker', + pathParts: { + task_type: params.task_type, + amazonsagemaker_inference_id: params.amazonsagemaker_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic | Elasticsearch API documentation} @@ -1046,6 +1112,64 @@ export default class Inference { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Create a DeepSeek inference endpoint. Create an inference endpoint to perform an inference task with the `deepseek` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek | Elasticsearch API documentation} + */ + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_deepseek'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.deepseek_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_deepseek', + pathParts: { + task_type: params.task_type, + deepseek_inference_id: params.deepseek_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation} diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index 3a27e3549..bc6df9264 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -368,7 +368,7 @@ export default class Rollup { } /** - * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. **Searching both historical rollup and non-rollup data** The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" } } } } ``` The rollup search endpoint does two things when the search runs: * The original request is sent to the non-rollup index unaltered. * A rewritten version of the original request is sent to the rollup index. When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. + * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 79888212b..d2b3511a5 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -330,7 +330,7 @@ export default class Synonyms { } /** - * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. + * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. For practical examples of how to create or update a synonyms set, refer to the External documentation. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym | Elasticsearch API documentation} */ async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 264881ce8..982b35272 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -62,7 +62,7 @@ const acceptedParams: Record (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index 487bc580a..97097ea65 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -77,7 +77,7 @@ const acceptedParams: Record diff --git a/src/api/types.ts b/src/api/types.ts index c03fd02cd..c5a7bb6b7 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -4488,6 +4488,7 @@ export interface SearchStats { suggest_time?: Duration suggest_time_in_millis: DurationValue suggest_total: long + recent_search_load?: double groups?: Record } @@ -4691,6 +4692,15 @@ export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' export type TimeZone = string +export interface TokenPruningConfig { + /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ + tokens_freq_ratio_threshold?: integer + /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ + tokens_weight_threshold?: float + /** Whether to only score pruned tokens, vs only scoring kept tokens. */ + only_score_pruned_tokens?: boolean +} + export interface TopLeftBottomRightGeoBounds { top_left: GeoLocation bottom_right: GeoLocation @@ -6730,6 +6740,10 @@ export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFil type: 'arabic_normalization' } +export interface AnalysisArabicStemTokenFilter extends AnalysisTokenFilterBase { + type: 'arabic_stem' +} + export interface AnalysisArmenianAnalyzer { type: 'armenian' stopwords?: AnalysisStopWords @@ -6757,12 +6771,20 @@ export interface AnalysisBengaliAnalyzer { stem_exclusion?: string[] } +export interface AnalysisBengaliNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'bengali_normalization' +} + export interface AnalysisBrazilianAnalyzer { type: 'brazilian' stopwords?: AnalysisStopWords stopwords_path?: string } +export interface AnalysisBrazilianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'brazilian_stem' +} + export interface AnalysisBulgarianAnalyzer { type: 'bulgarian' stopwords?: AnalysisStopWords @@ -6892,6 +6914,10 @@ export interface AnalysisCzechAnalyzer { stem_exclusion?: string[] } +export interface AnalysisCzechStemTokenFilter extends AnalysisTokenFilterBase { + type: 'czech_stem' +} + export interface AnalysisDanishAnalyzer { type: 'danish' stopwords?: AnalysisStopWords @@ -6923,6 +6949,10 @@ export interface AnalysisDutchAnalyzer { stem_exclusion?: string[] } +export interface AnalysisDutchStemTokenFilter extends AnalysisTokenFilterBase { + type: 'dutch_stem' +} + export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { @@ -6942,7 +6972,7 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { custom_token_chars?: string max_gram?: integer min_gram?: integer - token_chars?: AnalysisTokenChar[] + token_chars?: string | AnalysisTokenChar[] } export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { @@ -7015,6 +7045,10 @@ export interface AnalysisFrenchAnalyzer { stem_exclusion?: string[] } +export interface AnalysisFrenchStemTokenFilter extends AnalysisTokenFilterBase { + type: 'french_stem' +} + export interface AnalysisGalicianAnalyzer { type: 'galician' stopwords?: AnalysisStopWords @@ -7033,6 +7067,10 @@ export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFil type: 'german_normalization' } +export interface AnalysisGermanStemTokenFilter extends AnalysisTokenFilterBase { + type: 'german_stem' +} + export interface AnalysisGreekAnalyzer { type: 'greek' stopwords?: AnalysisStopWords @@ -7109,16 +7147,16 @@ export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { type: 'icu_collation' alternate?: AnalysisIcuCollationAlternate - case_first?: AnalysisIcuCollationCaseFirst - case_level?: boolean + caseFirst?: AnalysisIcuCollationCaseFirst + caseLevel?: boolean country?: string decomposition?: AnalysisIcuCollationDecomposition - hiragana_quaternary_mode?: boolean + hiraganaQuaternaryMode?: boolean language?: string numeric?: boolean rules?: string strength?: AnalysisIcuCollationStrength - variable_top?: string + variableTop?: string variant?: string } @@ -7457,6 +7495,7 @@ export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBa type: 'pattern_replace' /** If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`. */ all?: boolean + flags?: string /** Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter. */ pattern: string /** Replacement substring. Defaults to an empty substring (`""`). */ @@ -7480,6 +7519,10 @@ export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFi type: 'persian_normalization' } +export interface AnalysisPersianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'persian_stem' +} + export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' @@ -7537,6 +7580,10 @@ export interface AnalysisRussianAnalyzer { stem_exclusion?: string[] } +export interface AnalysisRussianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'russian_stem' +} + export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'scandinavian_folding' } @@ -7733,7 +7780,7 @@ export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter +export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicStemTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisBengaliNormalizationTokenFilter | AnalysisBrazilianStemTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisCzechStemTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisDutchStemTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisFrenchStemTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisGermanStemTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPersianStemTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisRussianStemTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter export type AnalysisTokenizer = string | AnalysisTokenizerDefinition @@ -7830,6 +7877,7 @@ export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilte export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { type: 'aggregate_metric_double' default_metric: string + ignore_malformed?: boolean metrics: string[] time_series_metric?: MappingTimeSeriesMetricType } @@ -8099,6 +8147,7 @@ export interface MappingFlattenedProperty extends MappingPropertyBase { null_value?: string similarity?: string split_queries_on_whitespace?: boolean + time_series_dimensions?: string[] type: 'flattened' } @@ -8113,6 +8162,8 @@ export interface MappingFloatRangeProperty extends MappingRangePropertyBase { export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' +export type MappingGeoPointMetricType = 'gauge' | 'counter' | 'position' + export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean ignore_z_value?: boolean @@ -8121,6 +8172,7 @@ export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { on_script_error?: MappingOnScriptError script?: Script | ScriptSource type: 'geo_point' + time_series_metric?: MappingGeoPointMetricType } export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { @@ -8298,7 +8350,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingRankVectorProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { /** Metadata about the field. */ @@ -8326,6 +8378,14 @@ export interface MappingRankFeaturesProperty extends MappingPropertyBase { type: 'rank_features' } +export type MappingRankVectorElementType = 'byte' | 'float' | 'bit' + +export interface MappingRankVectorProperty extends MappingPropertyBase { + type: 'rank_vectors' + element_type?: MappingRankVectorElementType + dims?: integer +} + export interface MappingRoutingField { required: boolean } @@ -8429,8 +8489,24 @@ export interface MappingSourceField { export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' +export interface MappingSparseVectorIndexOptions { + /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. + * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. + * Default: false */ + prune?: boolean + /** Optional pruning configuration. + * If enabled, this will omit non-significant tokens from the query in order to improve query performance. + * This is only used if prune is set to true. + * If prune is set to true but pruning_config is not specified, default values will be used. */ + pruning_config?: TokenPruningConfig +} + export interface MappingSparseVectorProperty extends MappingPropertyBase { + store?: boolean type: 'sparse_vector' + /** Additional index options for the sparse vector field that controls the + * token pruning behavior of the sparse vector field. */ + index_options?: MappingSparseVectorIndexOptions } export type MappingSubobjects = boolean | 'true' | 'false' | 'auto' @@ -9742,7 +9818,7 @@ export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { * If enabled, this will omit non-significant tokens from the query in order to improve query performance. * This is only used if prune is set to true. * If prune is set to true but pruning_config is not specified, default values will be used. */ - pruning_config?: QueryDslTokenPruningConfig + pruning_config?: TokenPruningConfig } export interface QueryDslTermQuery extends QueryDslQueryBase { @@ -9788,20 +9864,11 @@ export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { model_text: string /** Token pruning configurations * @experimental */ - pruning_config?: QueryDslTokenPruningConfig + pruning_config?: TokenPruningConfig } export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' -export interface QueryDslTokenPruningConfig { - /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ - tokens_freq_ratio_threshold?: integer - /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ - tokens_weight_threshold?: float - /** Whether to only score pruned tokens, vs only scoring kept tokens. */ - only_score_pruned_tokens?: boolean -} - export interface QueryDslTypeQuery extends QueryDslQueryBase { value: string } @@ -9825,7 +9892,7 @@ export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { /** The tokens representing this query */ tokens: Record | Record[] /** Token pruning configurations */ - pruning_config?: QueryDslTokenPruningConfig + pruning_config?: TokenPruningConfig } export interface QueryDslWildcardQuery extends QueryDslQueryBase { @@ -10215,9 +10282,29 @@ export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'compl export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] +export type CatCatRecoveryColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'time' | 't' | 'ti' | 'primaryOrReplica' | 'type' | 'stage' | 'st' | 'source_host' | 'shost' | 'source_node' | 'snode' | 'target_host' | 'thost' | 'target_node' | 'tnode' | 'repository' | 'tnode' | 'snapshot' | 'snap' | 'files' | 'f' | 'files_recovered' | 'fr' | 'files_percent' | 'fp' | 'files_total' | 'tf' | 'bytes' | 'b' | 'bytes_recovered' | 'br' | 'bytes_percent' | 'bp' | 'bytes_total' | 'tb' | 'translog_ops' | 'to' | 'translog_ops_recovered' | 'tor' | 'translog_ops_percent' | 'top' | 'start_time' | 'start' | 'start_time_millis' | 'start_millis' | 'stop_time' | 'stop' | 'stop_time_millis' | 'stop_millis' | string + +export type CatCatRecoveryColumns = CatCatRecoveryColumn | CatCatRecoveryColumn[] + export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } +export type CatCatSegmentsColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'ip' | 'segment' | 'generation' | 'docs.count' | 'docs.deleted' | 'size' | 'size.memory' | 'committed' | 'searchable' | 'version' | 'compound' | 'id' | string + +export type CatCatSegmentsColumns = CatCatSegmentsColumn | CatCatSegmentsColumn[] + +export type CatCatShardColumn = 'completion.size' | 'cs' | 'completionSize' | 'dataset.size' | 'dense_vector.value_count' | 'dvc' | 'denseVectorCount' | 'docs' | 'd' | 'dc' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'id' | 'index' | 'i' | 'idx' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'node' | 'n' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'recoverysource.type' | 'rs' | 'refresh.time' | 'rti' | 'refreshTime' | 'refresh.total' | 'rto' | 'refreshTotal' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'seq_no.global_checkpoint' | 'sqg' | 'globalCheckpoint' | 'seq_no.local_checkpoint' | 'sql' | 'localCheckpoint' | 'seq_no.max' | 'sqm' | 'maxSeqNo' | 'shard' | 's' | 'sh' | 'dsparse_vector.value_count' | 'svc' | 'sparseVectorCount' | 'state' | 'st' | 'store' | 'sto' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'sync_id' | 'unassigned.at' | 'ua' | 'unassigned.details' | 'ud' | 'unassigned.for' | 'uf' | 'unassigned.reason' | 'ur' | string + +export type CatCatShardColumns = CatCatShardColumn | CatCatShardColumn[] + +export type CatCatSnapshotsColumn = 'id' | 'snapshot' | 'repository' | 're' | 'repo' | 'status' | 's' | 'start_epoch' | 'ste' | 'startEpoch' | 'start_time' | 'sti' | 'startTime' | 'end_epoch' | 'ete' | 'endEpoch' | 'end_time' | 'eti' | 'endTime' | 'duration' | 'dur' | 'indices' | 'i' | 'successful_shards' | 'ss' | 'failed_shards' | 'fs' | 'total_shards' | 'ts' | 'reason' | 'r' | string + +export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatNodeColumn[] + +export type CatCatThreadPoolColumn = 'active' | 'a' | 'completed' | 'c' | 'core' | 'cr' | 'ephemeral_id' | 'eid' | 'host' | 'h' | 'ip' | 'i' | 'keep_alive' | 'k' | 'largest' | 'l' | 'max' | 'mx' | 'name' | 'node_id' | 'id' | 'node_name' | 'pid' | 'p' | 'pool_size' | 'psz' | 'port' | 'po' | 'queue' | 'q' | 'queue_size' | 'qs' | 'rejected' | 'r' | 'size' | 'sz' | 'type' | 't' | string + +export type CatCatThreadPoolColumns = CatCatThreadPoolColumn | CatCatThreadPoolColumn[] + export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] @@ -13543,13 +13630,14 @@ export interface CatRecoveryRequest extends CatCatRequestBase { bytes?: Bytes /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatRecoveryColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** Unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, h?: never, s?: never, time?: never } @@ -13601,9 +13689,10 @@ export interface CatSegmentsRequest extends CatCatRequestBase { index?: Indices /** The unit used to display byte values. */ bytes?: Bytes - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatSegmentsColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names @@ -13768,14 +13857,14 @@ export interface CatShardsRequest extends CatCatRequestBase { /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + h?: CatCatShardColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, master_timeout?: never, time?: never } @@ -14405,8 +14494,9 @@ export interface CatSnapshotsRequest extends CatCatRequestBase { repository?: Names /** If `true`, the response does not include information from unavailable snapshots. */ ignore_unavailable?: boolean - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatSnapshotsColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -14692,8 +14782,8 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { * Accepts wildcard expressions. */ thread_pool_patterns?: Names /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + h?: CatCatThreadPoolColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names @@ -14704,7 +14794,7 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, time?: never, local?: never, master_timeout?: never } @@ -19119,6 +19209,8 @@ export interface IndicesIndexingSlowlogTresholds { index?: IndicesSlowlogTresholdLevels } +export type IndicesIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' + export type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' export interface IndicesMappingLimitSettings { @@ -19318,6 +19410,8 @@ export interface IndicesStorage { * setting is useful, for example, if you are in an environment where you can not control the ability to create a lot * of memory maps so you need disable the ability to use memory-mapping. */ allow_mmap?: boolean + /** How often store statistics are refreshed */ + stats_refresh_interval?: Duration } export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' | string @@ -19363,9 +19457,7 @@ export interface IndicesTranslogRetention { age?: Duration } -export type IndicesAddBlockIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' - -export interface IndicesAddBlockIndicesBlockStatus { +export interface IndicesAddBlockAddIndicesBlockStatus { name: IndexName blocked: boolean } @@ -19377,7 +19469,7 @@ export interface IndicesAddBlockRequest extends RequestBase { * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ index: IndexName /** The block type to add to the index. */ - block: IndicesAddBlockIndicesBlockOptions + block: IndicesIndicesBlockOptions /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ @@ -19405,7 +19497,7 @@ export interface IndicesAddBlockRequest extends RequestBase { export interface IndicesAddBlockResponse { acknowledged: boolean shards_acknowledged: boolean - indices: IndicesAddBlockIndicesBlockStatus[] + indices: IndicesAddBlockAddIndicesBlockStatus[] } export interface IndicesAnalyzeAnalyzeDetail { @@ -21195,6 +21287,49 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult +export interface IndicesRemoveBlockRemoveIndicesBlockStatus { + name: IndexName + unblocked?: boolean + exception?: ErrorCause +} + +export interface IndicesRemoveBlockRequest extends RequestBase { + /** A comma-separated list or wildcard expression of index names used to limit the request. + * By default, you must explicitly name the indices you are removing blocks from. + * To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. + * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ + index: IndexName + /** The block type to remove from the index. */ + block: IndicesIndicesBlockOptions + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * It can also be set to `-1` to indicate that the request should never timeout. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } +} + +export interface IndicesRemoveBlockResponse { + acknowledged: boolean + indices: IndicesRemoveBlockRemoveIndicesBlockStatus[] +} + export interface IndicesResolveClusterRequest extends RequestBase { /** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. * Resources on remote clusters can be specified using the ``:`` syntax. @@ -22324,6 +22459,25 @@ export interface InferenceContentObject { type: string } +export interface InferenceDeepSeekServiceSettings { + /** A valid API key for your DeepSeek account. + * You can find or create your DeepSeek API keys on the DeepSeek API key page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** For a `completion` or `chat_completion` task, the name of the model to use for the inference task. + * + * For the available `completion` and `chat_completion` models, refer to the [DeepSeek Models & Pricing docs](https://api-docs.deepseek.com/quick_start/pricing). */ + model_id: string + /** The URL endpoint to use for the requests. Defaults to `https://api.deepseek.com/chat/completions`. */ + url?: string +} + +export type InferenceDeepSeekServiceType = 'deepseek' + export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { pipelines: string[] } @@ -22546,6 +22700,13 @@ export interface InferenceInferenceEndpointInfoCohere extends InferenceInference task_type: InferenceTaskTypeCohere } +export interface InferenceInferenceEndpointInfoDeepSeek extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeDeepSeek +} + export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string @@ -22899,6 +23060,8 @@ export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion' export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion' +export type InferenceTaskTypeDeepSeek = 'completion' | 'chat_completion' + export type InferenceTaskTypeELSER = 'sparse_embedding' export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank' @@ -23259,6 +23422,26 @@ export interface InferencePutCohereRequest extends RequestBase { export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere +export interface InferencePutDeepseekRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceTaskTypeDeepSeek + /** The unique identifier of the inference endpoint. */ + deepseek_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `deepseek`. */ + service: InferenceDeepSeekServiceType + /** Settings used to install the inference model. + * These settings are specific to the `deepseek` service. */ + service_settings: InferenceDeepSeekServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutDeepseekResponse = InferenceInferenceEndpointInfoDeepSeek + export interface InferencePutElasticsearchRequest extends RequestBase { /** The type of the inference task that the model will perform. */ task_type: InferenceElasticsearchTaskType @@ -30537,15 +30720,16 @@ export interface NodesInfoNodeInfo { /** Short hash of the last git commit in this release. */ build_hash: string build_type: string + component_versions: Record /** The node’s host name. */ host: Host http?: NodesInfoNodeInfoHttp + index_version: VersionNumber /** The node’s IP address. */ ip: Ip jvm?: NodesInfoNodeJvmInfo /** The node's name */ name: Name - network?: NodesInfoNodeInfoNetwork os?: NodesInfoNodeOperatingSystemInfo plugins?: PluginStats[] process?: NodesInfoNodeProcessInfo @@ -30559,11 +30743,13 @@ export interface NodesInfoNodeInfo { transport?: NodesInfoNodeInfoTransport /** Host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress + transport_version: VersionNumber /** Elasticsearch version running on this node. */ version: VersionString modules?: PluginStats[] ingest?: NodesInfoNodeInfoIngest aggregations?: Record + remote_cluster_server?: NodesInfoRemoveClusterServer } export interface NodesInfoNodeInfoAction { @@ -30583,7 +30769,7 @@ export interface NodesInfoNodeInfoClient { } export interface NodesInfoNodeInfoDiscoverKeys { - seed_hosts?: string[] + seed_hosts?: string[] | string type?: string seed_providers?: string[] } @@ -30631,17 +30817,6 @@ export interface NodesInfoNodeInfoMemory { total_in_bytes: long } -export interface NodesInfoNodeInfoNetwork { - primary_interface: NodesInfoNodeInfoNetworkInterface - refresh_interval: integer -} - -export interface NodesInfoNodeInfoNetworkInterface { - address: string - mac_address: string - name: Name -} - export interface NodesInfoNodeInfoOSCPU { cache_size: string cache_size_in_bytes: integer @@ -30703,7 +30878,7 @@ export interface NodesInfoNodeInfoSettingsCluster { name: Name routing?: IndicesIndexRouting election: NodesInfoNodeInfoSettingsClusterElection - initial_master_nodes?: string[] + initial_master_nodes?: string[] | string deprecation_indexing?: NodesInfoDeprecationIndexing } @@ -30773,6 +30948,8 @@ export interface NodesInfoNodeInfoSettingsTransport { type: NodesInfoNodeInfoSettingsTransportType | string 'type.default'?: string features?: NodesInfoNodeInfoSettingsTransportFeatures + /** Only used in unit tests */ + ignore_deserialization_errors?: SpecUtilsStringified } export interface NodesInfoNodeInfoSettingsTransportFeatures { @@ -30850,8 +31027,6 @@ export interface NodesInfoNodeJvmInfo { vm_vendor: string vm_version: VersionString using_bundled_jdk: boolean - /** @alias using_bundled_jdk */ - bundled_jdk: boolean using_compressed_ordinary_object_pointers?: boolean | string input_arguments: string[] } @@ -30893,6 +31068,11 @@ export interface NodesInfoNodeThreadPoolInfo { type: string } +export interface NodesInfoRemoveClusterServer { + bound_address: TransportAddress[] + publish_address: TransportAddress +} + export interface NodesInfoRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds