diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index f49614716..8cbbf20d9 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -747,7 +747,7 @@ client.get({ id, index }) - **`id` (string)**: A unique document identifier. - **`index` (string)**: The name of the index that contains the document. -- **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. +- **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this parameter to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. - **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. - **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). @@ -755,7 +755,7 @@ client.get({ id, index }) - **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. -- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_fields` option. Object fields can't be returned; if specified, the request fails. - **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. - **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte" \| "force"))**: The version type. @@ -1857,6 +1857,7 @@ The document must still be reindexed, but using this API removes some network ro The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). +For usage examples such as partial updates, upserts, and scripted updates, see the External documentation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update) @@ -1914,6 +1915,30 @@ A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. +**Refreshing shards** + +Specifying the `refresh` parameter refreshes all shards once the request completes. +This is different to the update API's `refresh` parameter, which causes only the shard +that received the request to be refreshed. Unlike the update API, it does not support +`wait_for`. + +**Running update by query asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch +performs some preflight checks, launches the request, and returns a +[task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) you can use to cancel or get the status of the task. +Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + +**Waiting for active shards** + +`wait_for_active_shards` controls how many copies of a shard must be active +before proceeding with the request. See [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards) +for details. `timeout` controls how long each write request waits for unavailable +shards to become available. Both work exactly the way they work in the +[Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). Update by query uses scrolled searches, so you can also +specify the `scroll` parameter to control how long it keeps the search context +alive, for example `?scroll=10m`. The default is 5 minutes. + **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. @@ -1958,22 +1983,7 @@ If you're slicing manually or otherwise tuning automatic slicing, keep in mind t * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. - -**Update the document source** - -Update by query supports scripts to update the document source. -As with the update API, you can set `ctx.op` to change the operation that is performed. - -Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. -The update by query operation skips updating the document and increments the `noop` counter. - -Set `ctx.op = "delete"` if your script decides that the document should be deleted. -The update by query operation deletes the document and increments the `deleted` counter. - -Update by query supports only `index`, `noop`, and `delete`. -Setting `ctx.op` to anything else is an error. -Setting any other field in `ctx` is an error. -This API enables you to only modify the source of matching documents; you cannot move them. +Refer to the linked documentation for examples of how to update documents using the `_update_by_query` API: [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update-by-query) @@ -2795,11 +2805,12 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para - **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. - **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis") \| Enum("index" \| "shard" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.repositories [_cat.repositories] Get snapshot repository information. @@ -2846,8 +2857,9 @@ client.cat.segments({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id") \| Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the @@ -2876,12 +2888,12 @@ client.cat.shards({ ... }) Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - **`bytes` (Optional, Enum("b" \| "kb" \| "mb" \| "gb" \| "tb" \| "pb"))**: The unit used to display byte values. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason") \| Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. -- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: Unit used to display time values. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. ## client.cat.snapshots [_cat.snapshots] Get snapshot information. @@ -2904,7 +2916,8 @@ Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. - **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`h` (Optional, Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. +It supports simple wildcards. - **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -2985,8 +2998,8 @@ client.cat.threadPool({ ... }) #### Request (object) [_request_cat.thread_pool] - **`thread_pool_patterns` (Optional, string \| string[])**: A list of thread pool names used to limit the request. Accepts wildcard expressions. -- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. -- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +- **`h` (Optional, Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type") \| Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. - **`time` (Optional, Enum("nanos" \| "micros" \| "ms" \| "s" \| "m" \| "h" \| "d"))**: The unit used to display time values. @@ -2994,7 +3007,7 @@ or `:desc` as a suffix to the column name. local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. -- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. ## client.cat.transforms [_cat.transforms] Get transform information. @@ -4650,6 +4663,7 @@ A query ID is provided in the ES|QL async query API response for a query that do A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. - **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. - **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. - **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish. @@ -5235,7 +5249,7 @@ This could be a built-in analyzer, or an analyzer that’s been configured in th - **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. -- **`filter` (Optional, string \| { type } \| { type } \| { type, preserve_original } \| { type, ignored_scripts, output_unigrams } \| { type } \| { type } \| { type, common_words, common_words_path, ignore_case, query_mode } \| { type, filter, script } \| { type } \| { type, delimiter, encoding } \| { type, max_gram, min_gram, side, preserve_original } \| { type, articles, articles_path, articles_case } \| { type, max_output_size, separator } \| { type } \| { type } \| { type } \| { type, dedup, dictionary, locale, longest_only } \| { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } \| { type } \| { type, mode, types } \| { type, keep_words, keep_words_case, keep_words_path } \| { type, ignore_case, keywords, keywords_path, keywords_pattern } \| { type } \| { type } \| { type, max, min } \| { type, consume_all_tokens, max_token_count } \| { type, language } \| { type, bucket_count, hash_count, hash_set_size, with_rotation } \| { type, filters, preserve_original } \| { type, max_gram, min_gram, preserve_original } \| { type, stoptags } \| { type, patterns, preserve_original } \| { type, all, pattern, replacement } \| { type } \| { type } \| { type, script } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } \| { type, language } \| { type } \| { type, rules, rules_path } \| { type, language } \| { type, ignore_case, remove_trailing, stopwords, stopwords_path } \| { type } \| { type } \| { type } \| { type, length } \| { type, only_on_same_position } \| { type } \| { type, adjust_offsets, ignore_keywords } \| { type } \| { type, stopwords } \| { type, minimum_length } \| { type, use_romaji } \| { type, stoptags } \| { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } \| { type, unicode_set_filter } \| { type, name } \| { type, dir, id } \| { type, encoder, languageset, max_code_len, name_type, replace, rule_type } \| { type }[])**: Array of token filters used to apply after the tokenizer. +- **`filter` (Optional, string \| { type } \| { type } \| { type } \| { type, preserve_original } \| { type } \| { type } \| { type, ignored_scripts, output_unigrams } \| { type } \| { type } \| { type, common_words, common_words_path, ignore_case, query_mode } \| { type, filter, script } \| { type } \| { type } \| { type, delimiter, encoding } \| { type } \| { type, max_gram, min_gram, side, preserve_original } \| { type, articles, articles_path, articles_case } \| { type, max_output_size, separator } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, dedup, dictionary, locale, longest_only } \| { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } \| { type } \| { type, mode, types } \| { type, keep_words, keep_words_case, keep_words_path } \| { type, ignore_case, keywords, keywords_path, keywords_pattern } \| { type } \| { type } \| { type, max, min } \| { type, consume_all_tokens, max_token_count } \| { type, language } \| { type, bucket_count, hash_count, hash_set_size, with_rotation } \| { type, filters, preserve_original } \| { type, max_gram, min_gram, preserve_original } \| { type, stoptags } \| { type, patterns, preserve_original } \| { type, all, flags, pattern, replacement } \| { type } \| { type } \| { type } \| { type, script } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } \| { type, language } \| { type } \| { type, rules, rules_path } \| { type, language } \| { type, ignore_case, remove_trailing, stopwords, stopwords_path } \| { type } \| { type } \| { type } \| { type, length } \| { type, only_on_same_position } \| { type } \| { type, adjust_offsets, ignore_keywords } \| { type } \| { type, stopwords } \| { type, minimum_length } \| { type, use_romaji } \| { type, stoptags } \| { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } \| { type, unicode_set_filter } \| { type, name } \| { type, dir, id } \| { type, encoder, languageset, max_code_len, name_type, replace, rule_type } \| { type }[])**: Array of token filters used to apply after the tokenizer. - **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. - **`text` (Optional, string \| string[])**: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. @@ -6519,7 +6533,7 @@ a new date field is added instead of string. not used at all by Elasticsearch, but can be used to store application-specific metadata. - **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. -- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type @@ -10132,7 +10146,7 @@ restart the model deployment. ## client.ml.updateDataFrameAnalytics [_ml.update_data_frame_analytics] Update a data frame analytics job. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-data-frame-analytics) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics) ```ts client.ml.updateDataFrameAnalytics({ id }) @@ -10826,32 +10840,7 @@ The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. -**Searching both historical rollup and non-rollup data** - -The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. -This is done by simply adding the live indices to the URI. For example: - -``` -GET sensor-1,sensor_rollup/_rollup_search -{ - "size": 0, - "aggregations": { - "max_temperature": { - "max": { - "field": "temperature" - } - } - } -} -``` - -The rollup search endpoint does two things when the search runs: - -* The original request is sent to the non-rollup index unaltered. -* A rewritten version of the original request is sent to the rollup index. - -When the two responses are received, the endpoint rewrites the rollup response and merges the two together. -During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. +For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation. [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-rollup-search) @@ -14049,6 +14038,8 @@ If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. +For practical examples of how to create or update a synonyms set, refer to the External documentation. + [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-put-synonym) ```ts diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index f2dd81d62..53e5789e8 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -70,6 +70,7 @@ export default class Esql { body: [], query: [ 'drop_null_columns', + 'format', 'keep_alive', 'wait_for_completion_timeout' ] diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index a1d13bebe..76bbde531 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -4382,7 +4382,7 @@ export default class Ml { /** * Update a data frame analytics job. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics | Elasticsearch API documentation} */ async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index f130e2aa7..dd175dded 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -368,7 +368,7 @@ export default class Rollup { } /** - * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. **Searching both historical rollup and non-rollup data** The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" } } } } ``` The rollup search endpoint does two things when the search runs: * The original request is sent to the non-rollup index unaltered. * A rewritten version of the original request is sent to the rollup index. When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. + * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-rollup-search | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 99e4730a4..99e7f18b1 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -324,7 +324,7 @@ export default class Synonyms { } /** - * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. + * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. For practical examples of how to create or update a synonyms set, refer to the External documentation. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-put-synonym | Elasticsearch API documentation} */ async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/update.ts b/src/api/api/update.ts index ac8afe575..7ccb98e6c 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -62,7 +62,7 @@ const acceptedParams: Record (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index d17e16d1b..27e506317 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -77,7 +77,7 @@ const acceptedParams: Record diff --git a/src/api/types.ts b/src/api/types.ts index 24536477a..4a58bcb3b 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -812,7 +812,7 @@ export interface GetRequest extends RequestBase { /** The name of the index that contains the document. */ index: IndexName /** Indicates whether the request forces synthetic `_source`. - * Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. + * Use this parameter to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. * Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ force_synthetic_source?: boolean /** The node or shard the operation should be performed on. @@ -844,8 +844,8 @@ export interface GetRequest extends RequestBase { /** A comma-separated list of stored fields to return as part of a hit. * If no fields are specified, no stored fields are included in the response. * If this field is specified, the `_source` parameter defaults to `false`. - * Only leaf fields can be retrieved with the `stored_field` option. - * Object fields can't be returned;if specified, the request fails. */ + * Only leaf fields can be retrieved with the `stored_fields` option. + * Object fields can't be returned; if specified, the request fails. */ stored_fields?: Fields /** The version number for concurrency control. * It must match the current version of the document for the request to succeed. */ @@ -6711,6 +6711,10 @@ export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFil type: 'arabic_normalization' } +export interface AnalysisArabicStemTokenFilter extends AnalysisTokenFilterBase { + type: 'arabic_stem' +} + export interface AnalysisArmenianAnalyzer { type: 'armenian' stopwords?: AnalysisStopWords @@ -6738,12 +6742,20 @@ export interface AnalysisBengaliAnalyzer { stem_exclusion?: string[] } +export interface AnalysisBengaliNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'bengali_normalization' +} + export interface AnalysisBrazilianAnalyzer { type: 'brazilian' stopwords?: AnalysisStopWords stopwords_path?: string } +export interface AnalysisBrazilianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'brazilian_stem' +} + export interface AnalysisBulgarianAnalyzer { type: 'bulgarian' stopwords?: AnalysisStopWords @@ -6873,6 +6885,10 @@ export interface AnalysisCzechAnalyzer { stem_exclusion?: string[] } +export interface AnalysisCzechStemTokenFilter extends AnalysisTokenFilterBase { + type: 'czech_stem' +} + export interface AnalysisDanishAnalyzer { type: 'danish' stopwords?: AnalysisStopWords @@ -6904,6 +6920,10 @@ export interface AnalysisDutchAnalyzer { stem_exclusion?: string[] } +export interface AnalysisDutchStemTokenFilter extends AnalysisTokenFilterBase { + type: 'dutch_stem' +} + export type AnalysisEdgeNGramSide = 'front' | 'back' export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { @@ -6923,7 +6943,7 @@ export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { custom_token_chars?: string max_gram?: integer min_gram?: integer - token_chars?: AnalysisTokenChar[] + token_chars?: string | AnalysisTokenChar[] } export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { @@ -6996,6 +7016,10 @@ export interface AnalysisFrenchAnalyzer { stem_exclusion?: string[] } +export interface AnalysisFrenchStemTokenFilter extends AnalysisTokenFilterBase { + type: 'french_stem' +} + export interface AnalysisGalicianAnalyzer { type: 'galician' stopwords?: AnalysisStopWords @@ -7014,6 +7038,10 @@ export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFil type: 'german_normalization' } +export interface AnalysisGermanStemTokenFilter extends AnalysisTokenFilterBase { + type: 'german_stem' +} + export interface AnalysisGreekAnalyzer { type: 'greek' stopwords?: AnalysisStopWords @@ -7090,16 +7118,16 @@ export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { type: 'icu_collation' alternate?: AnalysisIcuCollationAlternate - case_first?: AnalysisIcuCollationCaseFirst - case_level?: boolean + caseFirst?: AnalysisIcuCollationCaseFirst + caseLevel?: boolean country?: string decomposition?: AnalysisIcuCollationDecomposition - hiragana_quaternary_mode?: boolean + hiraganaQuaternaryMode?: boolean language?: string numeric?: boolean rules?: string strength?: AnalysisIcuCollationStrength - variable_top?: string + variableTop?: string variant?: string } @@ -7438,6 +7466,7 @@ export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBa type: 'pattern_replace' /** If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`. */ all?: boolean + flags?: string /** Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter. */ pattern: string /** Replacement substring. Defaults to an empty substring (`""`). */ @@ -7461,6 +7490,10 @@ export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFi type: 'persian_normalization' } +export interface AnalysisPersianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'persian_stem' +} + export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' @@ -7518,6 +7551,10 @@ export interface AnalysisRussianAnalyzer { stem_exclusion?: string[] } +export interface AnalysisRussianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'russian_stem' +} + export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'scandinavian_folding' } @@ -7714,7 +7751,7 @@ export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter +export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicStemTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisBengaliNormalizationTokenFilter | AnalysisBrazilianStemTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisCzechStemTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisDutchStemTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisFrenchStemTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisGermanStemTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPersianStemTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisRussianStemTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter export type AnalysisTokenizer = string | AnalysisTokenizerDefinition @@ -7811,6 +7848,7 @@ export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilte export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { type: 'aggregate_metric_double' default_metric: string + ignore_malformed?: boolean metrics: string[] time_series_metric?: MappingTimeSeriesMetricType } @@ -8054,6 +8092,7 @@ export interface MappingFlattenedProperty extends MappingPropertyBase { null_value?: string similarity?: string split_queries_on_whitespace?: boolean + time_series_dimensions?: string[] type: 'flattened' } @@ -8068,6 +8107,8 @@ export interface MappingFloatRangeProperty extends MappingRangePropertyBase { export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' +export type MappingGeoPointMetricType = 'gauge' | 'counter' | 'position' + export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean ignore_z_value?: boolean @@ -8076,6 +8117,7 @@ export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { on_script_error?: MappingOnScriptError script?: Script | ScriptSource type: 'geo_point' + time_series_metric?: MappingGeoPointMetricType } export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { @@ -8253,7 +8295,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { type: 'point' } -export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingRankVectorProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { /** Metadata about the field. */ @@ -8281,6 +8323,14 @@ export interface MappingRankFeaturesProperty extends MappingPropertyBase { type: 'rank_features' } +export type MappingRankVectorElementType = 'byte' | 'float' | 'bit' + +export interface MappingRankVectorProperty extends MappingPropertyBase { + type: 'rank_vectors' + element_type?: MappingRankVectorElementType + dims?: integer +} + export interface MappingRoutingField { required: boolean } @@ -8374,6 +8424,7 @@ export interface MappingSourceField { export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' export interface MappingSparseVectorProperty extends MappingPropertyBase { + store?: boolean type: 'sparse_vector' } @@ -10161,9 +10212,29 @@ export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'compl export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] +export type CatCatRecoveryColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'time' | 't' | 'ti' | 'primaryOrReplica' | 'type' | 'stage' | 'st' | 'source_host' | 'shost' | 'source_node' | 'snode' | 'target_host' | 'thost' | 'target_node' | 'tnode' | 'repository' | 'tnode' | 'snapshot' | 'snap' | 'files' | 'f' | 'files_recovered' | 'fr' | 'files_percent' | 'fp' | 'files_total' | 'tf' | 'bytes' | 'b' | 'bytes_recovered' | 'br' | 'bytes_percent' | 'bp' | 'bytes_total' | 'tb' | 'translog_ops' | 'to' | 'translog_ops_recovered' | 'tor' | 'translog_ops_percent' | 'top' | 'start_time' | 'start' | 'start_time_millis' | 'start_millis' | 'stop_time' | 'stop' | 'stop_time_millis' | 'stop_millis' | string + +export type CatCatRecoveryColumns = CatCatRecoveryColumn | CatCatRecoveryColumn[] + export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { } +export type CatCatSegmentsColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'ip' | 'segment' | 'generation' | 'docs.count' | 'docs.deleted' | 'size' | 'size.memory' | 'committed' | 'searchable' | 'version' | 'compound' | 'id' | string + +export type CatCatSegmentsColumns = CatCatSegmentsColumn | CatCatSegmentsColumn[] + +export type CatCatShardColumn = 'completion.size' | 'cs' | 'completionSize' | 'dataset.size' | 'dense_vector.value_count' | 'dvc' | 'denseVectorCount' | 'docs' | 'd' | 'dc' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'id' | 'index' | 'i' | 'idx' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'node' | 'n' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'recoverysource.type' | 'rs' | 'refresh.time' | 'rti' | 'refreshTime' | 'refresh.total' | 'rto' | 'refreshTotal' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'seq_no.global_checkpoint' | 'sqg' | 'globalCheckpoint' | 'seq_no.local_checkpoint' | 'sql' | 'localCheckpoint' | 'seq_no.max' | 'sqm' | 'maxSeqNo' | 'shard' | 's' | 'sh' | 'dsparse_vector.value_count' | 'svc' | 'sparseVectorCount' | 'state' | 'st' | 'store' | 'sto' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'sync_id' | 'unassigned.at' | 'ua' | 'unassigned.details' | 'ud' | 'unassigned.for' | 'uf' | 'unassigned.reason' | 'ur' | string + +export type CatCatShardColumns = CatCatShardColumn | CatCatShardColumn[] + +export type CatCatSnapshotsColumn = 'id' | 'snapshot' | 'repository' | 're' | 'repo' | 'status' | 's' | 'start_epoch' | 'ste' | 'startEpoch' | 'start_time' | 'sti' | 'startTime' | 'end_epoch' | 'ete' | 'endEpoch' | 'end_time' | 'eti' | 'endTime' | 'duration' | 'dur' | 'indices' | 'i' | 'successful_shards' | 'ss' | 'failed_shards' | 'fs' | 'total_shards' | 'ts' | 'reason' | 'r' | string + +export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatNodeColumn[] + +export type CatCatThreadPoolColumn = 'active' | 'a' | 'completed' | 'c' | 'core' | 'cr' | 'ephemeral_id' | 'eid' | 'host' | 'h' | 'ip' | 'i' | 'keep_alive' | 'k' | 'largest' | 'l' | 'max' | 'mx' | 'name' | 'node_id' | 'id' | 'node_name' | 'pid' | 'p' | 'pool_size' | 'psz' | 'port' | 'po' | 'queue' | 'q' | 'queue_size' | 'qs' | 'rejected' | 'r' | 'size' | 'sz' | 'type' | 't' | string + +export type CatCatThreadPoolColumns = CatCatThreadPoolColumn | CatCatThreadPoolColumn[] + export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] @@ -13489,13 +13560,14 @@ export interface CatRecoveryRequest extends CatCatRequestBase { bytes?: Bytes /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatRecoveryColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** Unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, active_only?: never, bytes?: never, detailed?: never, h?: never, s?: never, time?: never } @@ -13547,9 +13619,10 @@ export interface CatSegmentsRequest extends CatCatRequestBase { index?: Indices /** The unit used to display byte values. */ bytes?: Bytes - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatSegmentsColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names @@ -13714,14 +13787,14 @@ export interface CatShardsRequest extends CatCatRequestBase { /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + h?: CatCatShardColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, bytes?: never, h?: never, s?: never, master_timeout?: never, time?: never } @@ -14351,8 +14424,9 @@ export interface CatSnapshotsRequest extends CatCatRequestBase { repository?: Names /** If `true`, the response does not include information from unavailable snapshots. */ ignore_unavailable?: boolean - /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatSnapshotsColumns /** List of columns that determine how the table should be sorted. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ @@ -14638,8 +14712,8 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { * Accepts wildcard expressions. */ thread_pool_patterns?: Names /** List of columns to appear in the response. Supports simple wildcards. */ - h?: Names - /** List of columns that determine how the table should be sorted. + h?: CatCatThreadPoolColumns + /** A comma-separated list of column names or aliases that determines the sort order. * Sorting defaults to ascending and can be changed by setting `:asc` * or `:desc` as a suffix to the column name. */ s?: Names @@ -14650,7 +14724,7 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { * from the cluster state of the master node. In both cases the coordinating * node will send requests for further information to each selected node. */ local?: boolean - /** Period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, time?: never, local?: never, master_timeout?: never } @@ -17782,6 +17856,8 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean + /** A short version of the Accept header, for example `json` or `yaml`. */ + format?: EsqlEsqlFormat /** The period for which the query and its results are stored in the cluster. * When this period expires, the query and its results are deleted, even if the query is still ongoing. */ keep_alive?: Duration @@ -17791,9 +17867,9 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { * Otherwise, the response returns an `is_running` value of `true` and no results. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } + body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } + querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } } export type EsqlAsyncQueryGetResponse = EsqlAsyncEsqlResult @@ -19155,6 +19231,8 @@ export interface IndicesStorage { * setting is useful, for example, if you are in an environment where you can not control the ability to create a lot * of memory maps so you need disable the ability to use memory-mapping. */ allow_mmap?: boolean + /** How often store statistics are refreshed */ + stats_refresh_interval?: Duration } export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' | string @@ -20017,10 +20095,6 @@ export interface IndicesGetRequest extends RequestBase { export type IndicesGetResponse = Record -export interface IndicesGetAliasIndexAliases { - aliases: Record -} - export interface IndicesGetAliasRequest extends RequestBase { /** Comma-separated list of aliases to retrieve. * Supports wildcards (`*`). @@ -20050,6 +20124,17 @@ export interface IndicesGetAliasRequest extends RequestBase { export type IndicesGetAliasResponse = Record +export interface IndicesGetAliasIndexAliases { + aliases: Record +} + +export interface IndicesGetAliasNotFoundAliasesKeys { + error: string + status: number +} +export type IndicesGetAliasNotFoundAliases = IndicesGetAliasNotFoundAliasesKeys +& { [property: string]: IndicesGetAliasIndexAliases | string | number } + export interface IndicesGetDataLifecycleDataStreamWithLifecycle { name: DataStreamName lifecycle?: IndicesDataStreamLifecycleWithRollover @@ -30196,15 +30281,16 @@ export interface NodesInfoNodeInfo { /** Short hash of the last git commit in this release. */ build_hash: string build_type: string + component_versions: Record /** The node’s host name. */ host: Host http?: NodesInfoNodeInfoHttp + index_version: VersionNumber /** The node’s IP address. */ ip: Ip jvm?: NodesInfoNodeJvmInfo /** The node's name */ name: Name - network?: NodesInfoNodeInfoNetwork os?: NodesInfoNodeOperatingSystemInfo plugins?: PluginStats[] process?: NodesInfoNodeProcessInfo @@ -30218,11 +30304,13 @@ export interface NodesInfoNodeInfo { transport?: NodesInfoNodeInfoTransport /** Host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress + transport_version: VersionNumber /** Elasticsearch version running on this node. */ version: VersionString modules?: PluginStats[] ingest?: NodesInfoNodeInfoIngest aggregations?: Record + remote_cluster_server?: NodesInfoRemoveClusterServer } export interface NodesInfoNodeInfoAction { @@ -30242,7 +30330,7 @@ export interface NodesInfoNodeInfoClient { } export interface NodesInfoNodeInfoDiscoverKeys { - seed_hosts?: string[] + seed_hosts?: string[] | string type?: string seed_providers?: string[] } @@ -30290,17 +30378,6 @@ export interface NodesInfoNodeInfoMemory { total_in_bytes: long } -export interface NodesInfoNodeInfoNetwork { - primary_interface: NodesInfoNodeInfoNetworkInterface - refresh_interval: integer -} - -export interface NodesInfoNodeInfoNetworkInterface { - address: string - mac_address: string - name: Name -} - export interface NodesInfoNodeInfoOSCPU { cache_size: string cache_size_in_bytes: integer @@ -30362,7 +30439,7 @@ export interface NodesInfoNodeInfoSettingsCluster { name: Name routing?: IndicesIndexRouting election: NodesInfoNodeInfoSettingsClusterElection - initial_master_nodes?: string[] + initial_master_nodes?: string[] | string deprecation_indexing?: NodesInfoDeprecationIndexing } @@ -30432,6 +30509,8 @@ export interface NodesInfoNodeInfoSettingsTransport { type: NodesInfoNodeInfoSettingsTransportType | string 'type.default'?: string features?: NodesInfoNodeInfoSettingsTransportFeatures + /** Only used in unit tests */ + ignore_deserialization_errors?: SpecUtilsStringified } export interface NodesInfoNodeInfoSettingsTransportFeatures { @@ -30509,8 +30588,6 @@ export interface NodesInfoNodeJvmInfo { vm_vendor: string vm_version: VersionString using_bundled_jdk: boolean - /** @alias using_bundled_jdk */ - bundled_jdk: boolean using_compressed_ordinary_object_pointers?: boolean | string input_arguments: string[] } @@ -30552,6 +30629,11 @@ export interface NodesInfoNodeThreadPoolInfo { type: string } +export interface NodesInfoRemoveClusterServer { + bound_address: TransportAddress[] + publish_address: TransportAddress +} + export interface NodesInfoRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds